diff --git a/.ci/bwcVersions b/.ci/bwcVersions new file mode 100644 index 0000000000000..0ab6d00211d43 --- /dev/null +++ b/.ci/bwcVersions @@ -0,0 +1,10 @@ +BWC_VERSION: + - "7.0.0" + - "7.0.1" + - "7.1.0" + - "7.1.1" + - "7.2.0" + - "7.2.1" + - "7.3.0" + - "7.4.0" + - "8.0.0" diff --git a/build.gradle b/build.gradle index 5c1fe80668283..68a4b4d156e8c 100644 --- a/build.gradle +++ b/build.gradle @@ -29,16 +29,13 @@ import org.gradle.util.DistributionLocator import org.gradle.plugins.ide.eclipse.model.SourceFolder plugins { - id 'com.gradle.build-scan' version '2.2.1' + id 'com.gradle.build-scan' version '2.3' id 'base' id 'elasticsearch.global-build-info' } -if (Boolean.valueOf(project.findProperty('org.elasticsearch.acceptScanTOS') ?: "false")) { - buildScan { - termsOfServiceUrl = 'https://gradle.com/terms-of-service' - termsOfServiceAgree = 'yes' - } -} + +apply plugin: 'nebula.info-scm' +apply from: 'gradle/build-scan.gradle' // common maven publishing configuration allprojects { @@ -49,7 +46,6 @@ allprojects { BuildPlugin.configureRepositories(project) -apply plugin: 'nebula.info-scm' String licenseCommit if (VersionProperties.elasticsearch.toString().endsWith('-SNAPSHOT')) { licenseCommit = scminfo.change ?: "master" // leniency for non git builds @@ -107,6 +103,17 @@ subprojects { * logic in VersionUtils.java. */ BwcVersions versions = new BwcVersions(file('server/src/main/java/org/elasticsearch/Version.java').readLines('UTF-8')) +task updateCIBwcVersions() { + doLast { + File yml = file(".ci/bwcVersions") + yml.text = "" + yml << "BWC_VERSION:\n" + versions.indexCompatible.each { + yml << " - \"$it\"\n" + } + } +} + // build metadata from previous build, contains eg hashes for bwc builds String buildMetadataValue = System.getenv('BUILD_METADATA') if (buildMetadataValue == null) { @@ -150,6 +157,12 @@ task verifyVersions { .collect { Version.fromString(it) } ) } + String ciYml = file(".ci/bwcVersions").text + bwcVersions.indexCompatible.each { + if (ciYml.contains("\"$it\"\n") == false) { + throw new Exception(".ci/bwcVersions is outdated, run `./gradlew updateCIBwcVersions` and check in the results"); + } + } } } @@ -160,8 +173,8 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true -final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ +boolean bwc_tests_enabled = false +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/43197" if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 72af6b8c330f8..d84a0e33bec27 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -825,7 +825,7 @@ class BuildPlugin implements Plugin { test.executable = "${ext.get('runtimeJavaHome')}/bin/java" test.workingDir = project.file("${project.buildDir}/testrun/${test.name}") - test.maxParallelForks = project.rootProject.extensions.getByType(ExtraPropertiesExtension).get('defaultParallel') as Integer + test.maxParallelForks = System.getProperty('tests.jvms', project.rootProject.extensions.extraProperties.get('defaultParallel').toString()) as Integer test.exclude '**/*$*.class' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 805a1b213e859..53a022a59052a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -21,11 +21,9 @@ package org.elasticsearch.gradle.doc import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.test.ClusterFormationTasks import org.elasticsearch.gradle.test.RestTestPlugin import org.gradle.api.Project import org.gradle.api.Task - /** * Sets up tests for documentation. */ @@ -38,7 +36,7 @@ public class DocsTestPlugin extends RestTestPlugin { super.apply(project) String distribution = System.getProperty('tests.distribution', 'default') // The distribution can be configured with -Dtests.distribution on the command line - project.testClusters.integTest.distribution = distribution.toUpperCase() + project.testClusters.integTest.testDistribution = distribution.toUpperCase() project.testClusters.integTest.nameCustomization = { it.replace("integTest", "node") } // Docs are published separately so no need to assemble project.tasks.assemble.enabled = false diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 439a60e6c3aaf..d89ed12b8167b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -353,7 +353,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { private void testSetup(Snippet snippet) { if (lastDocsPath == snippet.path) { - throw new InvalidUserDataException("$snippet: wasn't first") + throw new InvalidUserDataException("$snippet: wasn't first. TESTSETUP can only be used in the first snippet of a document.") } setupCurrent(snippet) current.println('---') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy index 8dcb862064ec9..32aca9e580839 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy @@ -58,6 +58,9 @@ public class AntFixture extends AntTask implements Fixture { @Input boolean useShell = false + @Input + int maxWaitInSeconds = 30 + /** * A flag to indicate whether the fixture should be run in the foreground, or spawned. * It is protected so subclasses can override (eg RunTask). @@ -128,7 +131,7 @@ public class AntFixture extends AntTask implements Fixture { String failedProp = "failed${name}" // first wait for resources, or the failure marker from the wrapper script - ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) { + ant.waitfor(maxwait: maxWaitInSeconds, maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) { or { resourceexists { file(file: failureMarker.toString()) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 9857a1bc29ed7..a73c44a2921bc 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -18,7 +18,7 @@ */ package org.elasticsearch.gradle.test -import org.elasticsearch.gradle.VersionProperties + import org.elasticsearch.gradle.testclusters.ElasticsearchCluster import org.elasticsearch.gradle.testclusters.TestClustersPlugin import org.gradle.api.DefaultTask @@ -26,7 +26,6 @@ import org.gradle.api.Task import org.gradle.api.execution.TaskExecutionAdapter import org.gradle.api.logging.Logger import org.gradle.api.logging.Logging -import org.gradle.api.specs.Specs import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Input import org.gradle.api.tasks.TaskState @@ -37,7 +36,6 @@ import org.gradle.plugins.ide.idea.IdeaPlugin import java.nio.charset.StandardCharsets import java.nio.file.Files import java.util.stream.Stream - /** * A wrapper task around setting up a cluster and running rest tests. */ @@ -69,8 +67,6 @@ class RestIntegTestTask extends DefaultTask { } else { project.testClusters { "$name" { - distribution = 'INTEG_TEST' - version = VersionProperties.elasticsearch javaHome = project.file(project.ext.runtimeJavaHome) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 71c9d53467502..4910a452da694 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -324,9 +324,9 @@ class VagrantTestPlugin implements Plugin { fi if [ -z "\$SYSTEM_JAVA_HOME" ]; then - export SYSTEM_JAVA_HOME="${-> convertPath(project, linuxSystemJdk.toString()) }" + export SYSTEM_JAVA_HOME="${-> convertLinuxPath(project, linuxSystemJdk.toString()) }" fi - "${-> convertPath(project, linuxGradleJdk.toString()) }"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" + "${-> convertLinuxPath(project, linuxGradleJdk.toString()) }"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" """ } Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) { @@ -335,14 +335,20 @@ class VagrantTestPlugin implements Plugin { // the use of $args rather than param() here is deliberate because the syntax for array (multivalued) parameters is likely // a little trappy for those unfamiliar with powershell contents """\ - if (\$args.Count -eq 0) { - \$testArgs = @("${-> project.extensions.esvagrant.testClass}") - } else { - \$testArgs = \$args + try { + if (\$args.Count -eq 0) { + \$testArgs = @("${-> project.extensions.esvagrant.testClass}") + } else { + \$testArgs = \$args + } + \$Env:SYSTEM_JAVA_HOME = "${-> convertWindowsPath(project, windowsSystemJdk.toString()) }" + & "${-> convertWindowsPath(project, windowsGradleJdk.toString()) }/bin/java" -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs + exit \$LASTEXITCODE + } catch { + # catch if we have a failure to even run the script at all above, equivalent to set -e, sort of + echo "\$_.Exception.Message" + exit 1 } - \$Env:SYSTEM_JAVA_HOME = "${-> convertPath(project, windowsSystemJdk.toString()) }" - & "${-> convertPath(project, windowsGradleJdk.toString()) }"/bin/java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs - exit \$LASTEXITCODE """ } @@ -578,7 +584,7 @@ class VagrantTestPlugin implements Plugin { if (LINUX_BOXES.contains(box)) { Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) { - remoteCommand "export SYSTEM_JAVA_HOME=\"${-> convertPath(project, linuxSystemJdk.toString())}\"; " + BATS_TEST_COMMAND + remoteCommand "export SYSTEM_JAVA_HOME=\"${-> convertLinuxPath(project, linuxSystemJdk.toString())}\"; " + BATS_TEST_COMMAND boxName box environmentVars vagrantEnvVars dependsOn up, setupPackagingTest, linuxSystemJdk @@ -625,7 +631,7 @@ class VagrantTestPlugin implements Plugin { // https://github.com/hashicorp/vagrant/blob/9c299a2a357fcf87f356bb9d56e18a037a53d138/plugins/communicators/winrm/communicator.rb#L195-L225 // https://devops-collective-inc.gitbooks.io/secrets-of-powershell-remoting/content/manuscript/accessing-remote-computers.html javaPackagingTest.command = 'winrm' - javaPackagingTest.args = ['--elevated', '--command', 'powershell -File "$Env:PACKAGING_TESTS/run-tests.ps1"'] + javaPackagingTest.args = ['--elevated', '--command', '& "$Env:PACKAGING_TESTS/run-tests.ps1"; exit $LASTEXITCODE'] } TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path) @@ -658,7 +664,10 @@ class VagrantTestPlugin implements Plugin { } // convert the given path from an elasticsearch repo path to a VM path - private String convertPath(Project project, String path) { + private String convertLinuxPath(Project project, String path) { return "/elasticsearch/" + project.rootDir.toPath().relativize(Paths.get(path)); } + private String convertWindowsPath(Project project, String path) { + return "C:\\elasticsearch\\" + project.rootDir.toPath().relativize(Paths.get(path)).toString().replace('/', '\\'); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java deleted file mode 100644 index 9cb3cc52dd09e..0000000000000 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle; - -public enum Distribution { - - INTEG_TEST("elasticsearch"), - DEFAULT("elasticsearch"), - OSS("elasticsearch-oss"); - - private final String artifactName; - - Distribution(String name) { - this.artifactName = name; - } - - public String getArtifactName() { - return artifactName; - } - - public String getGroup() { - if (this.equals(INTEG_TEST)) { - return "org.elasticsearch.distribution.integ-test-zip"; - } else { - return "org.elasticsearch.distribution." + name().toLowerCase(); - } - } - - public String getFileExtension() { - if (this.equals(INTEG_TEST)) { - return "zip"; - } else { - return OS.conditionalString() - .onUnix(() -> "tar.gz") - .onWindows(() -> "zip") - .supply(); - } - } - - public String getClassifier() { - if (this.equals(INTEG_TEST)) { - return ""; - } else { - return OS.conditional() - .onLinux(() -> "linux-x86_64") - .onWindows(() -> "windows-x86_64") - .onMac(() -> "darwin-x86_64") - .supply(); - } - } - - public String getLiveConfiguration() { - if (this.equals(INTEG_TEST)) { - return "integ-test-zip"; - } else { - return (this.equals(OSS) ? "oss-" : "") + OS.conditional() - .onLinux(() -> "linux-tar") - .onWindows(() -> "windows-zip") - .onMac(() -> "darwin-tar") - .supply(); - } - } - -} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index 28748c00f46e1..5a3a4a277dda7 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -54,6 +54,7 @@ */ public class DistributionDownloadPlugin implements Plugin { + private static final String CONTAINER_NAME = "elasticsearch_distributions"; private static final String FAKE_IVY_GROUP = "elasticsearch-distribution"; private static final String DOWNLOAD_REPO_NAME = "elasticsearch-downloads"; @@ -67,7 +68,7 @@ public void apply(Project project) { Configuration extractedConfiguration = project.getConfigurations().create("es_distro_extracted_" + name); return new ElasticsearchDistribution(name, project.getObjects(), fileConfiguration, extractedConfiguration); }); - project.getExtensions().add("elasticsearch_distributions", distributionsContainer); + project.getExtensions().add(CONTAINER_NAME, distributionsContainer); setupDownloadServiceRepo(project); @@ -78,6 +79,11 @@ public void apply(Project project) { project.afterEvaluate(this::setupDistributions); } + @SuppressWarnings("unchecked") + public static NamedDomainObjectContainer getContainer(Project project) { + return (NamedDomainObjectContainer) project.getExtensions().getByName(CONTAINER_NAME); + } + // pkg private for tests void setupDistributions(Project project) { for (ElasticsearchDistribution distribution : distributionsContainer) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java index ac18209a43373..53089f9b3d798 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java @@ -20,7 +20,9 @@ package org.elasticsearch.gradle; import org.gradle.api.Buildable; +import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.FileTree; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskDependency; @@ -28,6 +30,7 @@ import java.io.File; import java.util.Iterator; import java.util.Locale; +import java.util.concurrent.Callable; public class ElasticsearchDistribution implements Buildable { @@ -65,7 +68,7 @@ public String toString() { } // package private to tests can use - static final Platform CURRENT_PLATFORM = OS.conditional() + public static final Platform CURRENT_PLATFORM = OS.conditional() .onLinux(() -> Platform.LINUX) .onWindows(() -> Platform.WINDOWS) .onMac(() -> Platform.DARWIN) @@ -90,6 +93,10 @@ public TaskDependency getBuildDependencies() { return configuration.getBuildDependencies(); } + public FileTree getFileTree(Project project) { + return project.fileTree((Callable) configuration::getSingleFile); + } + @Override public String toString() { return configuration.getSingleFile().toString(); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java index 7ddec2b887ec6..4ed1543df1b55 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.gradle.precommit; +import de.thetaphi.forbiddenapis.cli.CliMain; import org.apache.commons.io.output.NullOutputStream; import org.elasticsearch.gradle.JdkJarHellCheck; +import org.elasticsearch.gradle.OS; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.JavaVersion; @@ -51,6 +53,7 @@ import java.nio.file.Files; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Set; import java.util.TreeSet; import java.util.regex.Matcher; @@ -69,6 +72,12 @@ public class ThirdPartyAuditTask extends DefaultTask { private static final Pattern VIOLATION_PATTERN = Pattern.compile( "\\s\\sin ([a-zA-Z0-9$.]+) \\(.*\\)" ); + private static final int SIG_KILL_EXIT_VALUE = 137; + private static final List EXPECTED_EXIT_CODES = Arrays.asList( + CliMain.EXIT_SUCCESS, + CliMain.EXIT_VIOLATION, + CliMain.EXIT_UNSUPPORTED_JDK + ); private Set missingClassExcludes = new TreeSet<>(); @@ -327,7 +336,7 @@ private String formatClassList(Set classList) { private String runForbiddenAPIsCli() throws IOException { ByteArrayOutputStream errorOut = new ByteArrayOutputStream(); - getProject().javaexec(spec -> { + ExecResult result = getProject().javaexec(spec -> { if (javaHome != null) { spec.setExecutable(javaHome + "/bin/java"); } @@ -336,6 +345,7 @@ private String runForbiddenAPIsCli() throws IOException { getRuntimeConfiguration(), getProject().getConfigurations().getByName("compileOnly") ); + spec.jvmArgs("-Xmx1g"); spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain"); spec.args( "-f", getSignatureFile().getAbsolutePath(), @@ -348,10 +358,18 @@ private String runForbiddenAPIsCli() throws IOException { } spec.setIgnoreExitValue(true); }); + if (OS.current().equals(OS.LINUX) && result.getExitValue() == SIG_KILL_EXIT_VALUE) { + throw new IllegalStateException( + "Third party audit was killed buy SIGKILL, could be a victim of the Linux OOM killer" + ); + } final String forbiddenApisOutput; try (ByteArrayOutputStream outputStream = errorOut) { forbiddenApisOutput = outputStream.toString(StandardCharsets.UTF_8.name()); } + if (EXPECTED_EXIT_CODES.contains(result.getExitValue()) == false) { + throw new IllegalStateException("Forbidden APIs cli failed: " + forbiddenApisOutput); + } return forbiddenApisOutput; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/test/RestTestRunnerTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/RestTestRunnerTask.java index eff05f64f9c33..95040af9809e4 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/test/RestTestRunnerTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/test/RestTestRunnerTask.java @@ -8,7 +8,7 @@ import java.util.ArrayList; import java.util.Collection; -import static org.elasticsearch.gradle.Distribution.INTEG_TEST; +import static org.elasticsearch.gradle.testclusters.TestDistribution.INTEG_TEST; /** * Customized version of Gradle {@link Test} task which tracks a collection of {@link ElasticsearchCluster} as a task input. We must do this @@ -23,7 +23,7 @@ public class RestTestRunnerTask extends Test { public RestTestRunnerTask() { super(); this.getOutputs().doNotCacheIf("Build cache is only enabled for tests against clusters using the 'integ-test' distribution", - task -> clusters.stream().flatMap(c -> c.getNodes().stream()).anyMatch(n -> n.getDistribution() != INTEG_TEST)); + task -> clusters.stream().flatMap(c -> c.getNodes().stream()).anyMatch(n -> n.getTestDistribution() != INTEG_TEST)); } @Nested diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index c343f56525aea..62621e51396e6 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -18,10 +18,9 @@ */ package org.elasticsearch.gradle.testclusters; -import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.ElasticsearchDistribution; import org.elasticsearch.gradle.FileSupplier; import org.elasticsearch.gradle.PropertyNormalization; -import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.http.WaitForHttpResource; import org.gradle.api.Named; import org.gradle.api.NamedDomainObjectContainer; @@ -43,7 +42,6 @@ import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; @@ -60,22 +58,23 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private final String clusterName; private final NamedDomainObjectContainer nodes; private final File workingDirBase; - private final File artifactsExtractDir; + private final Function distributionFactory; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); private final Project project; - public ElasticsearchCluster(String path, String clusterName, Project project, File artifactsExtractDir, File workingDirBase) { + public ElasticsearchCluster(String path, String clusterName, Project project, + Function distributionFactory, File workingDirBase) { this.path = path; this.clusterName = clusterName; this.project = project; + this.distributionFactory = distributionFactory; this.workingDirBase = workingDirBase; - this.artifactsExtractDir = artifactsExtractDir; this.nodes = project.container(ElasticsearchNode.class); this.nodes.add( new ElasticsearchNode( path, clusterName + "-0", - project, artifactsExtractDir, workingDirBase - ) + project, workingDirBase, distributionFactory.apply(0) + ) ); // configure the cluster name eagerly so nodes know about it this.nodes.all((node) -> node.defaultConfig.put("cluster.name", safeName(clusterName))); @@ -98,8 +97,8 @@ public void setNumberOfNodes(int numberOfNodes) { for (int i = nodes.size() ; i < numberOfNodes; i++) { this.nodes.add(new ElasticsearchNode( - path, clusterName + "-" + i, project, artifactsExtractDir, workingDirBase - )); + path, clusterName + "-" + i, project, workingDirBase, distributionFactory.apply(i) + )); } } @@ -121,8 +120,8 @@ public void setVersion(String version) { } @Override - public void setDistribution(Distribution distribution) { - nodes.all(each -> each.setDistribution(distribution)); + public void setTestDistribution(TestDistribution distribution) { + nodes.all(each -> each.setTestDistribution(distribution)); } @Override @@ -248,7 +247,7 @@ public void start() { for (ElasticsearchNode node : nodes) { if (nodeNames != null) { // Can only configure master nodes if we have node names defined - if (Version.fromString(node.getVersion()).getMajor() >= 7) { + if (node.getVersion().getMajor() >= 7) { node.defaultConfig.put("cluster.initial_master_nodes", "[" + nodeNames + "]"); node.defaultConfig.put("discovery.seed_providers", "file"); node.defaultConfig.put("discovery.seed_hosts", "[]"); @@ -338,12 +337,6 @@ public boolean isProcessAlive() { return nodes.stream().noneMatch(node -> node.isProcessAlive() == false); } - void eachVersionedDistribution(BiConsumer consumer) { - nodes.forEach(each -> { - consumer.accept(each.getVersion(), each.getDistribution()); - }); - } - public ElasticsearchNode singleNode() { if (nodes.size() != 1) { throw new IllegalStateException( diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 025cee2ffafb1..6ae9942ddfadf 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.gradle.testclusters; -import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.ElasticsearchDistribution; import org.elasticsearch.gradle.FileSupplier; import org.elasticsearch.gradle.LazyPropertyList; import org.elasticsearch.gradle.LazyPropertyMap; @@ -26,6 +26,7 @@ import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.PropertyNormalization; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.http.WaitForHttpResource; import org.gradle.api.Action; import org.gradle.api.Named; @@ -37,6 +38,7 @@ import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Nested; import org.gradle.api.tasks.PathSensitive; import org.gradle.api.tasks.PathSensitivity; @@ -46,11 +48,13 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.io.LineNumberReader; import java.io.UncheckedIOException; import java.net.URI; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; import java.time.Instant; @@ -61,6 +65,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; @@ -82,6 +87,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private static final Logger LOGGER = Logging.getLogger(ElasticsearchNode.class); private static final int ES_DESTROY_TIMEOUT = 20; private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS; + private static final int NODE_UP_TIMEOUT = 2; private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.MINUTES; private static final int ADDITIONAL_CONFIG_TIMEOUT = 15; @@ -89,13 +95,20 @@ public class ElasticsearchNode implements TestClusterConfiguration { private static final List OVERRIDABLE_SETTINGS = Arrays.asList( "path.repo", "discovery.seed_providers" + + ); + + private static final int TAIL_LOG_MESSAGES_COUNT = 40; + private static final List MESSAGES_WE_DONT_CARE_ABOUT = Arrays.asList( + "Option UseConcMarkSweepGC was deprecated", + "is a pre-release version of Elasticsearch", + "max virtual memory areas vm.max_map_count" ); private final String path; private final String name; private final Project project; private final AtomicBoolean configurationFrozen = new AtomicBoolean(false); - private final Path artifactsExtractDir; private final Path workingDir; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); @@ -121,19 +134,21 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Path esStderrFile; private final Path tmpDir; - private Distribution distribution; private String version; + private TestDistribution testDistribution; + private ElasticsearchDistribution distribution; private File javaHome; private volatile Process esProcess; private Function nameCustomization = Function.identity(); private boolean isWorkingDirConfigured = false; - ElasticsearchNode(String path, String name, Project project, File artifactsExtractDir, File workingDirBase) { + ElasticsearchNode(String path, String name, Project project, File workingDirBase, + ElasticsearchDistribution distribution) { this.path = path; this.name = name; this.project = project; - this.artifactsExtractDir = artifactsExtractDir.toPath(); this.workingDir = workingDirBase.toPath().resolve(safeName(name)).toAbsolutePath(); + this.distribution = distribution; confPathRepo = workingDir.resolve("repo"); configFile = workingDir.resolve("config/elasticsearch.yml"); confPathData = workingDir.resolve("data"); @@ -144,15 +159,18 @@ public class ElasticsearchNode implements TestClusterConfiguration { esStderrFile = confPathLogs.resolve("es.stderr.log"); tmpDir = workingDir.resolve("tmp"); waitConditions.put("ports files", this::checkPortsFilesExistWithDelay); + + setTestDistribution(TestDistribution.INTEG_TEST); + setVersion(VersionProperties.getElasticsearch()); } public String getName() { return nameCustomization.apply(name); } - @Input - public String getVersion() { - return version; + @Internal + public Version getVersion() { + return distribution.getVersion(); } @Override @@ -160,18 +178,35 @@ public void setVersion(String version) { requireNonNull(version, "null version passed when configuring test cluster `" + this + "`"); checkFrozen(); this.version = version; + this.distribution.setVersion(version); } - @Input - public Distribution getDistribution() { + @Internal + public TestDistribution getTestDistribution() { + return testDistribution; + } + + // package private just so test clusters plugin can access to wire up task dependencies + @Internal + ElasticsearchDistribution getDistribution() { return distribution; } @Override - public void setDistribution(Distribution distribution) { - requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`"); + public void setTestDistribution(TestDistribution testDistribution) { + requireNonNull(testDistribution, "null distribution passed when configuring test cluster `" + this + "`"); checkFrozen(); - this.distribution = distribution; + this.testDistribution = testDistribution; + if (testDistribution == TestDistribution.INTEG_TEST) { + this.distribution.setType(ElasticsearchDistribution.Type.INTEG_TEST_ZIP); + } else { + this.distribution.setType(ElasticsearchDistribution.Type.ARCHIVE); + if (testDistribution == TestDistribution.DEFAULT) { + this.distribution.setFlavor(ElasticsearchDistribution.Flavor.DEFAULT); + } else { + this.distribution.setFlavor(ElasticsearchDistribution.Flavor.OSS); + } + } } @Override @@ -277,7 +312,7 @@ public Path getConfigDir() { @Override public void freeze() { requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`"); - requireNonNull(version, "null version passed when configuring test cluster `" + this + "`"); + requireNonNull(getVersion(), "null version passed when configuring test cluster `" + this + "`"); requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`"); LOGGER.info("Locking configuration of `{}`", this); configurationFrozen.set(true); @@ -320,7 +355,7 @@ public synchronized void start() { try { if (isWorkingDirConfigured == false) { logToProcessStdout("Configuring working directory: " + workingDir); - // Only configure working dir once so we don't loose data on restarts + // Only configure working dir once so we don't lose data on restarts isWorkingDirConfigured = true; createWorkingDir(getExtractedDistributionDir()); } @@ -430,10 +465,11 @@ private void copyExtraConfigFiles() { } private void installModules() { - if (distribution == Distribution.INTEG_TEST) { + if (testDistribution == TestDistribution.INTEG_TEST) { logToProcessStdout("Installing " + modules.size() + "modules"); for (File module : modules) { - Path destination = workingDir.resolve("modules").resolve(module.getName().replace(".zip", "").replace("-" + version, "")); + Path destination = workingDir.resolve("modules").resolve(module.getName().replace(".zip", "") + .replace("-" + version, "")); // only install modules that are not already bundled with the integ-test distribution if (Files.exists(destination) == false) { @@ -693,14 +729,73 @@ private void logProcessInfo(String prefix, ProcessHandle.Info info) { } private void logFileContents(String description, Path from) { - LOGGER.error("{} `{}`", description, this); - try (Stream lines = Files.lines(from, StandardCharsets.UTF_8)) { - lines - .map(line -> " " + line) - .forEach(LOGGER::error); + final Map errorsAndWarnings = new LinkedHashMap<>(); + LinkedList ring = new LinkedList<>(); + try (LineNumberReader reader = new LineNumberReader(Files.newBufferedReader(from))) { + for (String line = reader.readLine(); line != null ; line = reader.readLine()) { + final String lineToAdd; + if (ring.isEmpty()) { + lineToAdd = line; + } else { + if (line.startsWith("[")) { + lineToAdd = line; + // check to see if the previous message (possibly combined from multiple lines) was an error or + // warning as we want to show all of them + String previousMessage = normalizeLogLine(ring.getLast()); + if (MESSAGES_WE_DONT_CARE_ABOUT.stream().noneMatch(previousMessage::contains) && + (previousMessage.contains("ERROR") || previousMessage.contains("WARN"))) { + errorsAndWarnings.put( + previousMessage, + errorsAndWarnings.getOrDefault(previousMessage, 0) + 1 + ); + } + } else { + // We combine multi line log messages to make sure we never break exceptions apart + lineToAdd = ring.removeLast() + "\n" + line; + } + } + ring.add(lineToAdd); + if (ring.size() >= TAIL_LOG_MESSAGES_COUNT) { + ring.removeFirst(); + } + } } catch (IOException e) { throw new UncheckedIOException("Failed to tail log " + this, e); } + + if (errorsAndWarnings.isEmpty() == false || ring.isEmpty() == false) { + LOGGER.error("\n=== {} `{}` ===", description, this); + } + if (errorsAndWarnings.isEmpty() == false) { + LOGGER.lifecycle("\n» ↓ errors and warnings from " + from + " ↓"); + errorsAndWarnings.forEach((message, count) -> { + LOGGER.lifecycle("» " + message.replace("\n", "\n» ")); + if (count > 1) { + LOGGER.lifecycle("» ↑ repeated " + count + " times ↑"); + } + }); + } + + ring.removeIf(line -> MESSAGES_WE_DONT_CARE_ABOUT.stream().anyMatch(line::contains)); + + if (ring.isEmpty() == false) { + LOGGER.lifecycle("» ↓ last " + TAIL_LOG_MESSAGES_COUNT + " non error or warning messages from " + from + " ↓"); + ring.forEach(message -> { + if (errorsAndWarnings.containsKey(normalizeLogLine(message)) == false) { + LOGGER.lifecycle("» " + message.replace("\n", "\n» ")); + } + }); + } + } + + private String normalizeLogLine(String line) { + if (line.contains("ERROR")) { + return line.substring(line.indexOf("ERROR")); + } + if (line.contains("WARN")) { + return line.substring(line.indexOf("WARN")); + } + return line; } private void waitForProcessToExit(ProcessHandle processHandle) { @@ -780,7 +875,7 @@ private void createConfiguration() { defaultConfig.put("node.attr.testattr", "test"); defaultConfig.put("node.portsfile", "true"); defaultConfig.put("http.port", "0"); - if (Version.fromString(version).onOrAfter(Version.fromString("6.7.0"))) { + if (getVersion().onOrAfter(Version.fromString("6.7.0"))) { defaultConfig.put("transport.port", "0"); } else { defaultConfig.put("transport.tcp.port", "0"); @@ -790,13 +885,13 @@ private void createConfiguration() { defaultConfig.put("cluster.routing.allocation.disk.watermark.high", "1b"); // increase script compilation limit since tests can rapid-fire script compilations defaultConfig.put("script.max_compilations_rate", "2048/1m"); - if (Version.fromString(version).getMajor() >= 6) { + if (getVersion().getMajor() >= 6) { defaultConfig.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); } // Temporarily disable the real memory usage circuit breaker. It depends on real memory usage which we have no full control // over and the REST client will not retry on circuit breaking exceptions yet (see #31986 for details). Once the REST client // can retry on circuit breaking exceptions, we can revert again to the default configuration. - if (Version.fromString(version).getMajor() >= 7) { + if (getVersion().getMajor() >= 7) { defaultConfig.put("indices.breaker.total.use_real_memory", "false"); } // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master @@ -868,7 +963,7 @@ private List readPortsFile(Path file) throws IOException { } private Path getExtractedDistributionDir() { - return artifactsExtractDir.resolve(distribution.getGroup()).resolve("elasticsearch-" + getVersion()); + return Paths.get(distribution.getExtracted().toString()).resolve("elasticsearch-" + version); } private List getInstalledFileSet(Action filter) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index f290b4aa91b8f..8b8c980f523f4 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.gradle.testclusters; -import org.elasticsearch.gradle.Distribution; import org.elasticsearch.gradle.FileSupplier; import org.elasticsearch.gradle.PropertyNormalization; import org.gradle.api.logging.Logging; @@ -39,7 +38,7 @@ public interface TestClusterConfiguration { void setVersion(String version); - void setDistribution(Distribution distribution); + void setTestDistribution(TestDistribution distribution); void plugin(URI plugin); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 3c50108d9a179..fd1ab7d02c96c 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -19,21 +19,15 @@ package org.elasticsearch.gradle.testclusters; import groovy.lang.Closure; -import org.elasticsearch.gradle.BwcVersions; -import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.DistributionDownloadPlugin; +import org.elasticsearch.gradle.ElasticsearchDistribution; import org.elasticsearch.gradle.test.RestTestRunnerTask; -import org.elasticsearch.gradle.tool.Boilerplate; -import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; -import org.gradle.api.artifacts.Configuration; -import org.gradle.api.artifacts.repositories.MavenArtifactRepository; -import org.gradle.api.credentials.HttpHeaderCredentials; import org.gradle.api.execution.TaskActionListener; import org.gradle.api.execution.TaskExecutionListener; -import org.gradle.api.file.FileTree; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; import org.gradle.api.plugins.ExtraPropertiesExtension; @@ -53,8 +47,6 @@ public class TestClustersPlugin implements Plugin { private static final String LIST_TASK_NAME = "listTestClusters"; public static final String EXTENSION_NAME = "testClusters"; - private static final String HELPER_CONFIGURATION_PREFIX = "testclusters"; - private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts"; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure"; @@ -64,13 +56,9 @@ public class TestClustersPlugin implements Plugin { private final Set runningClusters = new HashSet<>(); private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false")); - public static String getHelperConfigurationName(String version) { - return HELPER_CONFIGURATION_PREFIX + "-" + version; - } - @Override public void apply(Project project) { - Project rootProject = project.getRootProject(); + project.getPlugins().apply(DistributionDownloadPlugin.class); // enable the DSL to describe clusters NamedDomainObjectContainer container = createTestClustersContainerExtension(project); @@ -94,17 +82,11 @@ public void apply(Project project) { // After each task we determine if there are clusters that are no longer needed. configureStopClustersHook(project); - - // Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the - // configuration so the user doesn't have to repeat this. - autoConfigureClusterDependencies(project, rootProject, container); - } - - private static File getExtractDir(Project project) { - return new File(project.getRootProject().getBuildDir(), "testclusters/extract/"); } private NamedDomainObjectContainer createTestClustersContainerExtension(Project project) { + NamedDomainObjectContainer distros = DistributionDownloadPlugin.getContainer(project); + // Create an extensions that allows describing clusters NamedDomainObjectContainer container = project.container( ElasticsearchCluster.class, @@ -112,7 +94,7 @@ private NamedDomainObjectContainer createTestClustersConta project.getPath(), name, project, - new File(project.getRootProject().getBuildDir(), "testclusters/extract"), + i -> distros.create(name + "-" + i), new File(project.getBuildDir(), "testclusters") ) ); @@ -153,9 +135,9 @@ public void doCall(ElasticsearchCluster cluster) { "Task, but got: " + thisObject.getClass()); } usedClusters.computeIfAbsent(task, k -> new ArrayList<>()).add(cluster); - ((Task) thisObject).dependsOn( - project.getRootProject().getTasks().getByName(SYNC_ARTIFACTS_TASK_NAME) - ); + for (ElasticsearchNode node : cluster.getNodes()) { + ((Task) thisObject).dependsOn(node.getDistribution().getExtracted()); + } if (thisObject instanceof RestTestRunnerTask) { ((RestTestRunnerTask) thisObject).testCluster(cluster); } @@ -285,145 +267,4 @@ private void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { } cluster.stop(taskFailed); } - - /** - * Boilerplate to get testClusters container extension - * - * Equivalent to project.testClusters in the DSL - */ - @SuppressWarnings("unchecked") - public static NamedDomainObjectContainer getNodeExtension(Project project) { - return (NamedDomainObjectContainer) - project.getExtensions().getByName(EXTENSION_NAME); - } - - private static void autoConfigureClusterDependencies( - Project project, - Project rootProject, - NamedDomainObjectContainer container - ) { - // Download integ test distribution from maven central - MavenArtifactRepository mavenCentral = project.getRepositories().mavenCentral(); - mavenCentral.content(spec -> { - spec.includeGroupByRegex("org\\.elasticsearch\\.distribution\\..*"); - }); - - // Other distributions from the download service - project.getRepositories().add( - project.getRepositories().ivy(spec -> { - spec.setUrl("https://artifacts.elastic.co/downloads"); - spec.patternLayout(p -> p.artifact("elasticsearch/[module]-[revision](-[classifier]).[ext]")); - HttpHeaderCredentials headerConfig = spec.getCredentials(HttpHeaderCredentials.class); - headerConfig.setName("X-Elastic-No-KPI"); - headerConfig.setValue("1"); - spec.content(c-> c.includeGroupByRegex("org\\.elasticsearch\\.distribution\\..*")); - }) - ); - - // We have a single task to sync the helper configuration to "artifacts dir" - // the clusters will look for artifacts there based on the naming conventions. - // Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in - // the build. - Boilerplate.maybeCreate(rootProject.getTasks(), SYNC_ARTIFACTS_TASK_NAME, onCreate -> { - onCreate.getOutputs().dir(getExtractDir(rootProject)); - onCreate.getInputs().files( - project.getRootProject().getConfigurations().matching(conf -> conf.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) - ); - onCreate.dependsOn(project.getRootProject().getConfigurations() - .matching(conf -> conf.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) - ); - // NOTE: Gradle doesn't allow a lambda here ( fails at runtime ) - onCreate.doFirst(new Action() { - @Override - public void execute(Task task) { - // Clean up the extract dir first to make sure we have no stale files from older - // previous builds of the same distribution - project.delete(getExtractDir(rootProject)); - } - }); - onCreate.doLast(new Action() { - @Override - public void execute(Task task) { - project.getRootProject().getConfigurations() - .matching(config -> config.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) - .forEach(config -> project.copy(spec -> - config.getResolvedConfiguration() - .getResolvedArtifacts() - .forEach(resolvedArtifact -> { - final FileTree files; - File file = resolvedArtifact.getFile(); - if (file.getName().endsWith(".zip")) { - files = project.zipTree(file); - } else if (file.getName().endsWith("tar.gz")) { - files = project.tarTree(file); - } else { - throw new IllegalArgumentException("Can't extract " + file + " unknown file extension"); - } - logger.info("Extracting {}@{}", resolvedArtifact, config); - spec.from(files, s -> s.into(resolvedArtifact.getModuleVersion().getId().getGroup())); - spec.into(getExtractDir(project)); - })) - ); - } - }); - }); - - // When the project evaluated we know of all tasks that use clusters. - // Each of these have to depend on the artifacts being synced. - // We need afterEvaluate here despite the fact that container is a domain object, we can't implement this with - // all because fields can change after the fact. - project.afterEvaluate(ip -> container.forEach(esCluster -> - esCluster.eachVersionedDistribution((version, distribution) -> { - Configuration helperConfiguration = Boilerplate.maybeCreate( - rootProject.getConfigurations(), - getHelperConfigurationName(version), - onCreate -> - // We use a single configuration on the root project to resolve all testcluster dependencies ( like distros ) - // at once, only once without the need to repeat it for each project. This pays off assuming that most - // projects use the same dependencies. - onCreate.setDescription( - "Internal helper configuration used by cluster configuration to download " + - "ES distributions and plugins for " + version - ) - ); - BwcVersions.UnreleasedVersionInfo unreleasedInfo; - final List unreleased; - { - ExtraPropertiesExtension extraProperties = project.getExtensions().getExtraProperties(); - if (extraProperties.has("bwcVersions")) { - Object bwcVersionsObj = extraProperties.get("bwcVersions"); - if (bwcVersionsObj instanceof BwcVersions == false) { - throw new IllegalStateException("Expected project.bwcVersions to be of type VersionCollection " + - "but instead it was " + bwcVersionsObj.getClass()); - } - final BwcVersions bwcVersions = (BwcVersions) bwcVersionsObj; - unreleased = ((BwcVersions) bwcVersionsObj).getUnreleased(); - unreleasedInfo = bwcVersions.unreleasedInfo(Version.fromString(version)); - } else { - logger.info("No version information available, assuming all versions used are released"); - unreleased = Collections.emptyList(); - unreleasedInfo = null; - } - } - if (unreleased.contains(Version.fromString(version))) { - Map projectNotation = new HashMap<>(); - projectNotation.put("path", unreleasedInfo.gradleProjectPath); - projectNotation.put("configuration", distribution.getLiveConfiguration()); - rootProject.getDependencies().add( - helperConfiguration.getName(), - project.getDependencies().project(projectNotation) - ); - } else { - rootProject.getDependencies().add( - helperConfiguration.getName(), - distribution.getGroup() + ":" + - distribution.getArtifactName() + ":" + - version + - (distribution.getClassifier().isEmpty() ? "" : ":" + distribution.getClassifier()) + "@" + - distribution.getFileExtension()); - - } - }))); - } - } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestDistribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestDistribution.java new file mode 100644 index 0000000000000..61419835b189f --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestDistribution.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.testclusters; + +/** + * An enumeration of the distributions that may be used in test clusters. + */ +public enum TestDistribution { + INTEG_TEST, + DEFAULT, + OSS +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index 56774b9b30b11..556e938875e26 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -122,7 +122,7 @@ public void apply(Project project) { extension.fixtures .matching(fixtureProject -> fixtureProject.equals(project) == false) - .all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath())); + .all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath())); conditionTaskByType(tasks, extension, Test.class); conditionTaskByType(tasks, extension, getTaskClass("org.elasticsearch.gradle.test.RestIntegTestTask")); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java index 69169d13d1977..c32d8ad81a97c 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java @@ -188,9 +188,7 @@ private void assertDistroError(Project project, String name, String version, Typ private ElasticsearchDistribution createDistro(Project project, String name, String version, Type type, Platform platform, Flavor flavor, Boolean bundledJdk) { - @SuppressWarnings("unchecked") - NamedDomainObjectContainer distros = - (NamedDomainObjectContainer) project.getExtensions().getByName("elasticsearch_distributions"); + NamedDomainObjectContainer distros = DistributionDownloadPlugin.getContainer(project); return distros.create(name, distro -> { if (version != null) { distro.setVersion(version); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/EmptyDirTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/EmptyDirTaskTests.java index 9cb49e8efd418..517bf3dfc99e7 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/EmptyDirTaskTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/EmptyDirTaskTests.java @@ -21,6 +21,8 @@ import java.io.File; import java.io.IOException; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.apache.tools.ant.taskdefs.condition.Os; import org.elasticsearch.gradle.test.GradleUnitTestCase; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; @@ -49,6 +51,8 @@ public void testCreateEmptyDir() throws Exception { } public void testCreateEmptyDirNoPermissions() throws Exception { + RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + Project project = ProjectBuilder.builder().build(); EmptyDirTask emptyDirTask = project.getTasks().create("emptyDirTask", EmptyDirTask.class); emptyDirTask.setDirMode(0000); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTaskIT.java index b14b3538fc276..a57d6da036583 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTaskIT.java @@ -48,27 +48,7 @@ public void testWithEmptyRules() { "-PcompileOnlyGroup=other.gradle:broken-log4j", "-PcompileOnlyVersion=0.0.1", "-PcompileGroup=other.gradle:dummy-io", "-PcompileVersion=0.0.1" ) - .build(); - - assertTaskSuccessful(result, ":empty"); - - result = getGradleRunner("thirdPartyAudit") - .withArguments("empty", "-s", - "-PcompileOnlyGroup=other.gradle:broken-log4j", "-PcompileOnlyVersion=0.0.1", - "-PcompileGroup=other.gradle:dummy-io", "-PcompileVersion=0.0.1" - ) - .build(); - - assertTaskUpToDate(result, ":empty"); - - result = getGradleRunner("thirdPartyAudit") - .withArguments("empty", "-s", - "-PcompileOnlyGroup=other.gradle:broken-log4j", "-PcompileOnlyVersion=0.0.1", - "-PcompileGroup=other.gradle:dummy-io", "-PcompileVersion=0.0.2" - ) - .build(); - - assertTaskSuccessful(result, ":empty"); + .buildAndFail(); } public void testViolationFoundAndCompileOnlyIgnored() { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index a59f54e132073..6859d9ee85869 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -68,9 +68,9 @@ public void testUseClusterByTwo() { public void testUseClusterByUpToDateTask() { // Run it once, ignoring the result and again to make sure it's considered up to date. // Gradle randomly considers tasks without inputs and outputs as as up-to-date or success on the first run - getTestClustersRunner(":upToDate1", ":upToDate2").build(); - BuildResult result = getTestClustersRunner(":upToDate1", ":upToDate2").build(); - assertTaskUpToDate(result, ":upToDate1", ":upToDate2"); + getTestClustersRunner(":upToDate1").build(); + BuildResult result = getTestClustersRunner(":upToDate1").build(); + assertTaskUpToDate(result, ":upToDate1"); assertNotStarted(result); } diff --git a/buildSrc/src/testKit/testclusters/build.gradle b/buildSrc/src/testKit/testclusters/build.gradle index e4f912a3d7a63..4296965a73fb9 100644 --- a/buildSrc/src/testKit/testclusters/build.gradle +++ b/buildSrc/src/testKit/testclusters/build.gradle @@ -26,7 +26,7 @@ allprojects { all -> all.testClusters { myTestCluster { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' version = System.getProperty("test.version_under_test") javaHome = file(System.getProperty('java.home')) plugin file("${project(":dummyPlugin").buildDir}/distributions/dummy-${System.getProperty("test.version_under_test")}.zip") @@ -54,23 +54,23 @@ allprojects { all -> testClusters { multiNode { version = System.getProperty("test.version_under_test") - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' javaHome = file(System.getProperty('java.home')) numberOfNodes = 3 } releasedVersionDefault { version = "7.0.0" - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' javaHome = file(System.getProperty('java.home')) } releasedVersionOSS { version = "7.0.0" - distribution = 'OSS' + testDistribution = 'OSS' javaHome = file(System.getProperty('java.home')) } releasedVersionIntegTest { version = "7.0.0" - distribution = 'INTEG_TEST' + testDistribution = 'INTEG_TEST' javaHome = file(System.getProperty('java.home')) } } @@ -105,10 +105,10 @@ task printLog { task upToDate1 { useCluster testClusters.myTestCluster -} - -task upToDate2 { - useCluster testClusters.myTestCluster + outputs.upToDateWhen { true } + doLast { + println "Some task action" + } } task skipped1 { @@ -137,6 +137,6 @@ task illegalConfigAlter { useCluster testClusters.myTestCluster doFirst { println "Going to alter configuration after use" - testClusters.myTestCluster.distribution = 'OSS' + testClusters.myTestCluster.testDistribution = 'OSS' } } diff --git a/buildSrc/src/testKit/thirdPartyAudit/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/build.gradle index 725be970fd952..e3258ce310be4 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/build.gradle @@ -2,8 +2,8 @@ import org.elasticsearch.gradle.precommit.ThirdPartyAuditTask plugins { id 'java' - //just to get build-tools - id 'elasticsearch.testclusters' + // bring in build-tools onto the classpath + id 'elasticsearch.global-build-info' apply false } repositories { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index cbcbf00fc6d01..5cff6641ac590 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.0.0 -lucene = 8.1.0 +lucene = 8.2.0-snapshot-6413aae226 bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691 diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java index e6412099fee72..065c249a25ec6 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java @@ -18,20 +18,15 @@ */ package org.elasticsearch.plugin.noop.action.bulk; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.bulk.BulkResponse; -public class NoopBulkAction extends StreamableResponseActionType { +public class NoopBulkAction extends ActionType { public static final String NAME = "mock:data/write/bulk"; public static final NoopBulkAction INSTANCE = new NoopBulkAction(); private NoopBulkAction() { - super(NAME); - } - - @Override - public BulkResponse newResponse() { - return new BulkResponse(null, 0); + super(NAME, BulkResponse::new); } } diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java index 8805af367a80e..a8317fec83a1a 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -87,7 +87,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC private static class BulkRestBuilderListener extends RestBuilderListener { private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, - new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); + new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 0L, 1L, 1L, DocWriteResponse.Result.CREATED)); private final RestRequest request; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index 671e6b338e2df..e7f22e2f5983f 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -32,15 +32,13 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; -import java.util.function.Supplier; - public class TransportNoopBulkAction extends HandledTransportAction { private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, - new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); + new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 0L, 1L, 1L, DocWriteResponse.Result.CREATED)); @Inject public TransportNoopBulkAction(TransportService transportService, ActionFilters actionFilters) { - super(NoopBulkAction.NAME, transportService, actionFilters, (Supplier) BulkRequest::new); + super(NoopBulkAction.NAME, transportService, actionFilters, BulkRequest::new); } @Override diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java index fb83bda148b11..001c092eccd8a 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java @@ -20,18 +20,12 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.io.stream.Writeable; public class NoopSearchAction extends ActionType { public static final NoopSearchAction INSTANCE = new NoopSearchAction(); public static final String NAME = "mock:data/read/search"; private NoopSearchAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return SearchResponse::new; + super(NAME, SearchResponse::new); } } diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 0e05eb567687a..9a485e96f9cc2 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -122,7 +122,7 @@ integTest.runner { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' systemProperty 'es.scripting.update.ctx_in_params', 'false' setting 'reindex.remote.whitelist', '[ "[::1]:*", "127.0.0.1:*" ]' setting 'xpack.license.self_generated.type', 'trial' diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java index 18dfc2305575d..f45f2a6c8ec40 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java @@ -37,7 +37,9 @@ import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; import static org.elasticsearch.client.RequestConverters.createEntity; +import static org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest.FORCE; import static org.elasticsearch.client.dataframe.GetDataFrameTransformRequest.ALLOW_NO_MATCH; +import static org.elasticsearch.client.dataframe.PutDataFrameTransformRequest.DEFER_VALIDATION; final class DataFrameRequestConverters { @@ -50,6 +52,9 @@ static Request putDataFrameTransform(PutDataFrameTransformRequest putRequest) th .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); request.setEntity(createEntity(putRequest, REQUEST_BODY_CONTENT_TYPE)); + if (putRequest.getDeferValidation() != null) { + request.addParameter(DEFER_VALIDATION, Boolean.toString(putRequest.getDeferValidation())); + } return request; } @@ -71,12 +76,16 @@ static Request getDataFrameTransform(GetDataFrameTransformRequest getRequest) { return request; } - static Request deleteDataFrameTransform(DeleteDataFrameTransformRequest request) { + static Request deleteDataFrameTransform(DeleteDataFrameTransformRequest deleteRequest) { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_data_frame", "transforms") - .addPathPart(request.getId()) + .addPathPart(deleteRequest.getId()) .build(); - return new Request(HttpDelete.METHOD_NAME, endpoint); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + if (deleteRequest.getForce() != null) { + request.addParameter(FORCE, Boolean.toString(deleteRequest.getForce())); + } + return request; } static Request startDataFrameTransform(StartDataFrameTransformRequest startRequest) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java index 224c6b2caf434..60a95e510a998 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -34,6 +34,12 @@ import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.StartILMRequest; import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.client.snapshotlifecycle.DeleteSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyResponse; +import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyResponse; +import org.elasticsearch.client.snapshotlifecycle.PutSnapshotLifecyclePolicyRequest; import java.io.IOException; @@ -300,4 +306,144 @@ public void retryLifecyclePolicyAsync(RetryLifecyclePolicyRequest request, Reque restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } + + /** + * Retrieve one or more snapshot lifecycle policy definitions. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-get-snapshot-lifecycle-policy.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetSnapshotLifecyclePolicyResponse getSnapshotLifecyclePolicy(GetSnapshotLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecyclePolicy, + options, GetSnapshotLifecyclePolicyResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retrieve one or more snapshot lifecycle policy definition. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-get-snapshot-lifecycle-policy.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getSnapshotLifecyclePolicyAsync(GetSnapshotLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecyclePolicy, + options, GetSnapshotLifecyclePolicyResponse::fromXContent, listener, emptySet()); + } + + /** + * Create or modify a snapshot lifecycle definition. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-put-snapshot-lifecycle-policy.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse putSnapshotLifecyclePolicy(PutSnapshotLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::putSnapshotLifecyclePolicy, + options, AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously create or modify a snapshot lifecycle definition. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-put-snapshot-lifecycle-policy.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putSnapshotLifecyclePolicyAsync(PutSnapshotLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::putSnapshotLifecyclePolicy, + options, AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Delete a snapshot lifecycle definition + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-delete-snapshot-lifecycle-policy.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse deleteSnapshotLifecyclePolicy(DeleteSnapshotLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::deleteSnapshotLifecyclePolicy, + options, AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously delete a snapshot lifecycle definition + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-delete-snapshot-lifecycle-policy.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteSnapshotLifecyclePolicyAsync(DeleteSnapshotLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::deleteSnapshotLifecyclePolicy, + options, AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Execute a snapshot lifecycle definition + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-execute-snapshot-lifecycle-policy.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ExecuteSnapshotLifecyclePolicyResponse executeSnapshotLifecyclePolicy(ExecuteSnapshotLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::executeSnapshotLifecyclePolicy, + options, ExecuteSnapshotLifecyclePolicyResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously execute a snapshot lifecycle definition + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-execute-snapshot-lifecycle-policy.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void executeSnapshotLifecyclePolicyAsync(ExecuteSnapshotLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::executeSnapshotLifecyclePolicy, + options, ExecuteSnapshotLifecyclePolicyResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java index f39f2b36cebc0..f1d90adca1b6b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java @@ -32,6 +32,10 @@ import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.StartILMRequest; import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.client.snapshotlifecycle.DeleteSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.common.Strings; import java.io.IOException; @@ -159,4 +163,56 @@ static Request retryLifecycle(RetryLifecyclePolicyRequest retryLifecyclePolicyRe request.addParameters(params.asMap()); return request; } + + static Request getSnapshotLifecyclePolicy(GetSnapshotLifecyclePolicyRequest getSnapshotLifecyclePolicyRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_slm/policy") + .addCommaSeparatedPathParts(getSnapshotLifecyclePolicyRequest.getPolicyIds()).build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(); + params.withMasterTimeout(getSnapshotLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(getSnapshotLifecyclePolicyRequest.timeout()); + request.addParameters(params.asMap()); + return request; + } + + static Request putSnapshotLifecyclePolicy(PutSnapshotLifecyclePolicyRequest putSnapshotLifecyclePolicyRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_slm/policy") + .addPathPartAsIs(putSnapshotLifecyclePolicyRequest.getPolicy().getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(); + params.withMasterTimeout(putSnapshotLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(putSnapshotLifecyclePolicyRequest.timeout()); + request.addParameters(params.asMap()); + request.setEntity(RequestConverters.createEntity(putSnapshotLifecyclePolicyRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteSnapshotLifecyclePolicy(DeleteSnapshotLifecyclePolicyRequest deleteSnapshotLifecyclePolicyRequest) { + Request request = new Request(HttpDelete.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_slm/policy") + .addPathPartAsIs(deleteSnapshotLifecyclePolicyRequest.getPolicyId()) + .build()); + RequestConverters.Params params = new RequestConverters.Params(); + params.withMasterTimeout(deleteSnapshotLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(deleteSnapshotLifecyclePolicyRequest.timeout()); + request.addParameters(params.asMap()); + return request; + } + + static Request executeSnapshotLifecyclePolicy(ExecuteSnapshotLifecyclePolicyRequest executeSnapshotLifecyclePolicyRequest) { + Request request = new Request(HttpPut.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_slm/policy") + .addPathPartAsIs(executeSnapshotLifecyclePolicyRequest.getPolicyId()) + .addPathPartAsIs("_execute") + .build()); + RequestConverters.Params params = new RequestConverters.Params(); + params.withMasterTimeout(executeSnapshotLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(executeSnapshotLifecyclePolicyRequest.timeout()); + request.addParameters(params.asMap()); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 584bdad745026..42791b719c4e2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -569,7 +569,7 @@ public void flushAsync(FlushRequest flushRequest, RequestOptions options, Action /** * Initiate a synced flush manually using the synced flush API. - * See + * See * Synced flush API on elastic.co * @param syncedFlushRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -583,7 +583,7 @@ public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, Re /** * Asynchronously initiate a synced flush manually using the synced flush API. - * See + * See * Synced flush API on elastic.co * @param syncedFlushRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 35abf5e85180f..4e1a571ec6560 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -1010,8 +1010,9 @@ public final void msearchAsync(MultiSearchRequest searchRequest, RequestOptions /** * Executes a search using the Search Scroll API. - * See Search Scroll - * API on elastic.co + * See Search + * Scroll API on elastic.co * @param searchScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response @@ -1024,8 +1025,9 @@ public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest /** * Executes a search using the Search Scroll API. - * See Search Scroll - * API on elastic.co + * See Search + * Scroll API on elastic.co * @param searchScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response @@ -1037,8 +1039,9 @@ public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, Requ /** * Asynchronously executes a search using the Search Scroll API. - * See Search Scroll - * API on elastic.co + * See Search + * Scroll API on elastic.co * @param searchScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion @@ -1052,8 +1055,9 @@ public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, Req /** * Asynchronously executes a search using the Search Scroll API. - * See Search Scroll - * API on elastic.co + * See Search + * Scroll API on elastic.co * @param searchScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion @@ -1066,7 +1070,8 @@ public final void scrollAsync(SearchScrollRequest searchScrollRequest, RequestOp /** * Clears one or more scroll ids using the Clear Scroll API. - * See + * See * Clear Scroll API on elastic.co * @param clearScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -1079,7 +1084,8 @@ public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollReque /** * Asynchronously clears one or more scroll ids using the Clear Scroll API. - * See + * See * Clear Scroll API on elastic.co * @param clearScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequest.java index bf893e4ea4b89..18323a7b2e411 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequest.java @@ -31,7 +31,10 @@ */ public class DeleteDataFrameTransformRequest implements Validatable { + public static final String FORCE = "force"; + private final String id; + private Boolean force; public DeleteDataFrameTransformRequest(String id) { this.id = id; @@ -41,6 +44,14 @@ public String getId() { return id; } + public Boolean getForce() { + return force; + } + + public void setForce(boolean force) { + this.force = force; + } + @Override public Optional validate() { if (id == null) { @@ -54,7 +65,7 @@ public Optional validate() { @Override public int hashCode() { - return Objects.hash(id); + return Objects.hash(id, force); } @Override @@ -67,6 +78,6 @@ public boolean equals(Object obj) { return false; } DeleteDataFrameTransformRequest other = (DeleteDataFrameTransformRequest) obj; - return Objects.equals(id, other.id); + return Objects.equals(id, other.id) && Objects.equals(force, other.force); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequest.java index 8ac80587fed34..814414f04eece 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequest.java @@ -31,7 +31,9 @@ public class PutDataFrameTransformRequest implements ToXContentObject, Validatable { + public static final String DEFER_VALIDATION = "defer_validation"; private final DataFrameTransformConfig config; + private Boolean deferValidation; public PutDataFrameTransformRequest(DataFrameTransformConfig config) { this.config = config; @@ -41,6 +43,19 @@ public DataFrameTransformConfig getConfig() { return config; } + public Boolean getDeferValidation() { + return deferValidation; + } + + /** + * Indicates if deferrable validations should be skipped until the transform starts + * + * @param deferValidation {@code true} will cause validations to be deferred + */ + public void setDeferValidation(boolean deferValidation) { + this.deferValidation = deferValidation; + } + @Override public Optional validate() { ValidationException validationException = new ValidationException(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPosition.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPosition.java new file mode 100644 index 0000000000000..86a2527ffdd25 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPosition.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Holds state of the cursors: + * + * indexer_position: the position of the indexer querying the source + * bucket_position: the position used for identifying changes + */ +public class DataFrameIndexerPosition { + public static final ParseField INDEXER_POSITION = new ParseField("indexer_position"); + public static final ParseField BUCKET_POSITION = new ParseField("bucket_position"); + + private final Map indexerPosition; + private final Map bucketPosition; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "data_frame_indexer_position", + true, + args -> new DataFrameIndexerPosition((Map) args[0],(Map) args[1])); + + static { + PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, INDEXER_POSITION, ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, BUCKET_POSITION, ValueType.OBJECT); + } + + public DataFrameIndexerPosition(Map indexerPosition, Map bucketPosition) { + this.indexerPosition = indexerPosition == null ? null : Collections.unmodifiableMap(indexerPosition); + this.bucketPosition = bucketPosition == null ? null : Collections.unmodifiableMap(bucketPosition); + } + + public Map getIndexerPosition() { + return indexerPosition; + } + + public Map getBucketsPosition() { + return bucketPosition; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameIndexerPosition that = (DataFrameIndexerPosition) other; + + return Objects.equals(this.indexerPosition, that.indexerPosition) && + Objects.equals(this.bucketPosition, that.bucketPosition); + } + + @Override + public int hashCode() { + return Objects.hash(indexerPosition, bucketPosition); + } + + public static DataFrameIndexerPosition fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java index 355e3ad9bbc0f..2810d6a8cfad2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -44,6 +45,7 @@ public class DataFrameTransformConfig implements ToXContentObject { public static final ParseField ID = new ParseField("id"); public static final ParseField SOURCE = new ParseField("source"); public static final ParseField DEST = new ParseField("dest"); + public static final ParseField FREQUENCY = new ParseField("frequency"); public static final ParseField DESCRIPTION = new ParseField("description"); public static final ParseField SYNC = new ParseField("sync"); public static final ParseField VERSION = new ParseField("version"); @@ -54,6 +56,7 @@ public class DataFrameTransformConfig implements ToXContentObject { private final String id; private final SourceConfig source; private final DestConfig dest; + private final TimeValue frequency; private final SyncConfig syncConfig; private final PivotConfig pivotConfig; private final String description; @@ -66,14 +69,16 @@ public class DataFrameTransformConfig implements ToXContentObject { String id = (String) args[0]; SourceConfig source = (SourceConfig) args[1]; DestConfig dest = (DestConfig) args[2]; - SyncConfig syncConfig = (SyncConfig) args[3]; - PivotConfig pivotConfig = (PivotConfig) args[4]; - String description = (String)args[5]; - Instant createTime = (Instant)args[6]; - String transformVersion = (String)args[7]; + TimeValue frequency = (TimeValue) args[3]; + SyncConfig syncConfig = (SyncConfig) args[4]; + PivotConfig pivotConfig = (PivotConfig) args[5]; + String description = (String)args[6]; + Instant createTime = (Instant)args[7]; + String transformVersion = (String)args[8]; return new DataFrameTransformConfig(id, source, dest, + frequency, syncConfig, pivotConfig, description, @@ -85,6 +90,8 @@ public class DataFrameTransformConfig implements ToXContentObject { PARSER.declareString(constructorArg(), ID); PARSER.declareObject(constructorArg(), (p, c) -> SourceConfig.PARSER.apply(p, null), SOURCE); PARSER.declareObject(constructorArg(), (p, c) -> DestConfig.PARSER.apply(p, null), DEST); + PARSER.declareField(optionalConstructorArg(), p -> TimeValue.parseTimeValue(p.text(), FREQUENCY.getPreferredName()), + FREQUENCY, ObjectParser.ValueType.STRING); PARSER.declareObject(optionalConstructorArg(), (p, c) -> parseSyncConfig(p), SYNC); PARSER.declareObject(optionalConstructorArg(), (p, c) -> PivotConfig.fromXContent(p), PIVOT_TRANSFORM); PARSER.declareString(optionalConstructorArg(), DESCRIPTION); @@ -118,12 +125,13 @@ public static DataFrameTransformConfig fromXContent(final XContentParser parser) * @return A DataFrameTransformConfig to preview, NOTE it will have a {@code null} id, destination and index. */ public static DataFrameTransformConfig forPreview(final SourceConfig source, final PivotConfig pivotConfig) { - return new DataFrameTransformConfig(null, source, null, null, pivotConfig, null, null, null); + return new DataFrameTransformConfig(null, source, null, null, null, pivotConfig, null, null, null); } DataFrameTransformConfig(final String id, final SourceConfig source, final DestConfig dest, + final TimeValue frequency, final SyncConfig syncConfig, final PivotConfig pivotConfig, final String description, @@ -132,6 +140,7 @@ public static DataFrameTransformConfig forPreview(final SourceConfig source, fin this.id = id; this.source = source; this.dest = dest; + this.frequency = frequency; this.syncConfig = syncConfig; this.pivotConfig = pivotConfig; this.description = description; @@ -151,6 +160,10 @@ public DestConfig getDestination() { return dest; } + public TimeValue getFrequency() { + return frequency; + } + public SyncConfig getSyncConfig() { return syncConfig; } @@ -184,6 +197,9 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa if (dest != null) { builder.field(DEST.getPreferredName(), dest); } + if (frequency != null) { + builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); + } if (syncConfig != null) { builder.startObject(SYNC.getPreferredName()); builder.field(syncConfig.getName(), syncConfig); @@ -220,6 +236,7 @@ public boolean equals(Object other) { return Objects.equals(this.id, that.id) && Objects.equals(this.source, that.source) && Objects.equals(this.dest, that.dest) + && Objects.equals(this.frequency, that.frequency) && Objects.equals(this.description, that.description) && Objects.equals(this.syncConfig, that.syncConfig) && Objects.equals(this.transformVersion, that.transformVersion) @@ -229,7 +246,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(id, source, dest, syncConfig, pivotConfig, description); + return Objects.hash(id, source, dest, frequency, syncConfig, pivotConfig, description); } @Override @@ -246,6 +263,7 @@ public static class Builder { private String id; private SourceConfig source; private DestConfig dest; + private TimeValue frequency; private SyncConfig syncConfig; private PivotConfig pivotConfig; private String description; @@ -265,6 +283,11 @@ public Builder setDest(DestConfig dest) { return this; } + public Builder setFrequency(TimeValue frequency) { + this.frequency = frequency; + return this; + } + public Builder setSyncConfig(SyncConfig syncConfig) { this.syncConfig = syncConfig; return this; @@ -281,7 +304,7 @@ public Builder setDescription(String description) { } public DataFrameTransformConfig build() { - return new DataFrameTransformConfig(id, source, dest, syncConfig, pivotConfig, description, null, null); + return new DataFrameTransformConfig(id, source, dest, frequency, syncConfig, pivotConfig, description, null, null); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java index 186c67bf42ce2..65216827f4837 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java @@ -27,8 +27,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; -import java.util.LinkedHashMap; import java.util.Map; import java.util.Objects; @@ -39,7 +37,10 @@ public class DataFrameTransformState { private static final ParseField INDEXER_STATE = new ParseField("indexer_state"); private static final ParseField TASK_STATE = new ParseField("task_state"); + + // 7.3 BWC: current_position only exists in 7.2. In 7.3+ it is replaced by position. private static final ParseField CURRENT_POSITION = new ParseField("current_position"); + private static final ParseField POSITION = new ParseField("position"); private static final ParseField CHECKPOINT = new ParseField("checkpoint"); private static final ParseField REASON = new ParseField("reason"); private static final ParseField PROGRESS = new ParseField("progress"); @@ -48,18 +49,31 @@ public class DataFrameTransformState { @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_frame_transform_state", true, - args -> new DataFrameTransformState((DataFrameTransformTaskState) args[0], - (IndexerState) args[1], - (Map) args[2], - (long) args[3], - (String) args[4], - (DataFrameTransformProgress) args[5], - (NodeAttributes) args[6])); + args -> { + DataFrameTransformTaskState taskState = (DataFrameTransformTaskState) args[0]; + IndexerState indexerState = (IndexerState) args[1]; + Map bwcCurrentPosition = (Map) args[2]; + DataFrameIndexerPosition dataFrameIndexerPosition = (DataFrameIndexerPosition) args[3]; + + // BWC handling, translate current_position to position iff position isn't set + if (bwcCurrentPosition != null && dataFrameIndexerPosition == null) { + dataFrameIndexerPosition = new DataFrameIndexerPosition(bwcCurrentPosition, null); + } + + long checkpoint = (long) args[4]; + String reason = (String) args[5]; + DataFrameTransformProgress progress = (DataFrameTransformProgress) args[6]; + NodeAttributes node = (NodeAttributes) args[7]; + + return new DataFrameTransformState(taskState, indexerState, dataFrameIndexerPosition, checkpoint, reason, progress, + node); + }); static { PARSER.declareField(constructorArg(), p -> DataFrameTransformTaskState.fromString(p.text()), TASK_STATE, ValueType.STRING); PARSER.declareField(constructorArg(), p -> IndexerState.fromString(p.text()), INDEXER_STATE, ValueType.STRING); PARSER.declareField(optionalConstructorArg(), (p, c) -> p.mapOrdered(), CURRENT_POSITION, ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), DataFrameIndexerPosition::fromXContent, POSITION, ValueType.OBJECT); PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REASON); PARSER.declareField(optionalConstructorArg(), DataFrameTransformProgress::fromXContent, PROGRESS, ValueType.OBJECT); @@ -73,21 +87,21 @@ public static DataFrameTransformState fromXContent(XContentParser parser) throws private final DataFrameTransformTaskState taskState; private final IndexerState indexerState; private final long checkpoint; - private final Map currentPosition; + private final DataFrameIndexerPosition position; private final String reason; private final DataFrameTransformProgress progress; private final NodeAttributes node; public DataFrameTransformState(DataFrameTransformTaskState taskState, IndexerState indexerState, - @Nullable Map position, + @Nullable DataFrameIndexerPosition position, long checkpoint, @Nullable String reason, @Nullable DataFrameTransformProgress progress, @Nullable NodeAttributes node) { this.taskState = taskState; this.indexerState = indexerState; - this.currentPosition = position == null ? null : Collections.unmodifiableMap(new LinkedHashMap<>(position)); + this.position = position; this.checkpoint = checkpoint; this.reason = reason; this.progress = progress; @@ -103,8 +117,8 @@ public DataFrameTransformTaskState getTaskState() { } @Nullable - public Map getPosition() { - return currentPosition; + public DataFrameIndexerPosition getPosition() { + return position; } public long getCheckpoint() { @@ -140,7 +154,7 @@ public boolean equals(Object other) { return Objects.equals(this.taskState, that.taskState) && Objects.equals(this.indexerState, that.indexerState) && - Objects.equals(this.currentPosition, that.currentPosition) && + Objects.equals(this.position, that.position) && Objects.equals(this.progress, that.progress) && this.checkpoint == that.checkpoint && Objects.equals(this.node, that.node) && @@ -149,7 +163,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason, progress, node); + return Objects.hash(taskState, indexerState, position, checkpoint, reason, progress, node); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java index 772dfbc0c5c13..6b4066af6f6aa 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -52,6 +53,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject { private static final ParseField STEP_TIME_FIELD = new ParseField("step_time"); private static final ParseField STEP_INFO_FIELD = new ParseField("step_info"); private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution"); + private static final ParseField AGE_FIELD = new ParseField("age"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "index_lifecycle_explain_response", true, @@ -205,6 +207,14 @@ public PhaseExecutionInfo getPhaseExecutionInfo() { return phaseExecutionInfo; } + public TimeValue getAge() { + if (lifecycleDate == null) { + return TimeValue.MINUS_ONE; + } else { + return TimeValue.timeValueMillis(System.currentTimeMillis() - lifecycleDate); + } + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -214,6 +224,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); if (lifecycleDate != null) { builder.timeField(LIFECYCLE_DATE_MILLIS_FIELD.getPreferredName(), LIFECYCLE_DATE_FIELD.getPreferredName(), lifecycleDate); + builder.field(AGE_FIELD.getPreferredName(), getAge().toHumanReadableString(2)); } if (phase != null) { builder.field(PHASE_FIELD.getPreferredName(), phase); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStats.java index 17617f556cf7b..9f9215e5046fe 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStats.java @@ -36,7 +36,9 @@ public class DatafeedTimingStats implements ToXContentObject { public static final ParseField JOB_ID = new ParseField("job_id"); public static final ParseField SEARCH_COUNT = new ParseField("search_count"); + public static final ParseField BUCKET_COUNT = new ParseField("bucket_count"); public static final ParseField TOTAL_SEARCH_TIME_MS = new ParseField("total_search_time_ms"); + public static final ParseField AVG_SEARCH_TIME_PER_BUCKET_MS = new ParseField("average_search_time_per_bucket_ms"); public static final ParseField TYPE = new ParseField("datafeed_timing_stats"); @@ -50,23 +52,37 @@ private static ConstructingObjectParser createParser( args -> { String jobId = (String) args[0]; Long searchCount = (Long) args[1]; - Double totalSearchTimeMs = (Double) args[2]; - return new DatafeedTimingStats(jobId, getOrDefault(searchCount, 0L), getOrDefault(totalSearchTimeMs, 0.0)); + Long bucketCount = (Long) args[2]; + Double totalSearchTimeMs = (Double) args[3]; + Double avgSearchTimePerBucketMs = (Double) args[4]; + return new DatafeedTimingStats( + jobId, + getOrDefault(searchCount, 0L), + getOrDefault(bucketCount, 0L), + getOrDefault(totalSearchTimeMs, 0.0), + avgSearchTimePerBucketMs); }); parser.declareString(constructorArg(), JOB_ID); parser.declareLong(optionalConstructorArg(), SEARCH_COUNT); + parser.declareLong(optionalConstructorArg(), BUCKET_COUNT); parser.declareDouble(optionalConstructorArg(), TOTAL_SEARCH_TIME_MS); + parser.declareDouble(optionalConstructorArg(), AVG_SEARCH_TIME_PER_BUCKET_MS); return parser; } private final String jobId; private long searchCount; + private long bucketCount; private double totalSearchTimeMs; + private Double avgSearchTimePerBucketMs; - public DatafeedTimingStats(String jobId, long searchCount, double totalSearchTimeMs) { + public DatafeedTimingStats( + String jobId, long searchCount, long bucketCount, double totalSearchTimeMs, @Nullable Double avgSearchTimePerBucketMs) { this.jobId = Objects.requireNonNull(jobId); this.searchCount = searchCount; + this.bucketCount = bucketCount; this.totalSearchTimeMs = totalSearchTimeMs; + this.avgSearchTimePerBucketMs = avgSearchTimePerBucketMs; } public String getJobId() { @@ -77,16 +93,28 @@ public long getSearchCount() { return searchCount; } + public long getBucketCount() { + return bucketCount; + } + public double getTotalSearchTimeMs() { return totalSearchTimeMs; } + public Double getAvgSearchTimePerBucketMs() { + return avgSearchTimePerBucketMs; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.field(JOB_ID.getPreferredName(), jobId); builder.field(SEARCH_COUNT.getPreferredName(), searchCount); + builder.field(BUCKET_COUNT.getPreferredName(), bucketCount); builder.field(TOTAL_SEARCH_TIME_MS.getPreferredName(), totalSearchTimeMs); + if (avgSearchTimePerBucketMs != null) { + builder.field(AVG_SEARCH_TIME_PER_BUCKET_MS.getPreferredName(), avgSearchTimePerBucketMs); + } builder.endObject(); return builder; } @@ -103,12 +131,14 @@ public boolean equals(Object obj) { DatafeedTimingStats other = (DatafeedTimingStats) obj; return Objects.equals(this.jobId, other.jobId) && this.searchCount == other.searchCount - && this.totalSearchTimeMs == other.totalSearchTimeMs; + && this.bucketCount == other.bucketCount + && this.totalSearchTimeMs == other.totalSearchTimeMs + && Objects.equals(this.avgSearchTimePerBucketMs, other.avgSearchTimePerBucketMs); } @Override public int hashCode() { - return Objects.hash(jobId, searchCount, totalSearchTimeMs); + return Objects.hash(jobId, searchCount, bucketCount, totalSearchTimeMs, avgSearchTimePerBucketMs); } @Override diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/MlEvaluationNamedXContentProvider.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/MlEvaluationNamedXContentProvider.java index 764ff41de86e0..a28c498b1d5af 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/MlEvaluationNamedXContentProvider.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/MlEvaluationNamedXContentProvider.java @@ -18,6 +18,9 @@ */ package org.elasticsearch.client.ml.dataframe.evaluation; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.MeanSquaredErrorMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.RSquaredMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.Regression; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.BinarySoftClassification; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -38,12 +41,17 @@ public List getNamedXContentParsers() { // Evaluations new NamedXContentRegistry.Entry( Evaluation.class, new ParseField(BinarySoftClassification.NAME), BinarySoftClassification::fromXContent), + new NamedXContentRegistry.Entry(Evaluation.class, new ParseField(Regression.NAME), Regression::fromXContent), // Evaluation metrics new NamedXContentRegistry.Entry(EvaluationMetric.class, new ParseField(AucRocMetric.NAME), AucRocMetric::fromXContent), new NamedXContentRegistry.Entry(EvaluationMetric.class, new ParseField(PrecisionMetric.NAME), PrecisionMetric::fromXContent), new NamedXContentRegistry.Entry(EvaluationMetric.class, new ParseField(RecallMetric.NAME), RecallMetric::fromXContent), new NamedXContentRegistry.Entry( EvaluationMetric.class, new ParseField(ConfusionMatrixMetric.NAME), ConfusionMatrixMetric::fromXContent), + new NamedXContentRegistry.Entry( + EvaluationMetric.class, new ParseField(MeanSquaredErrorMetric.NAME), MeanSquaredErrorMetric::fromXContent), + new NamedXContentRegistry.Entry( + EvaluationMetric.class, new ParseField(RSquaredMetric.NAME), RSquaredMetric::fromXContent), // Evaluation metrics results new NamedXContentRegistry.Entry( EvaluationMetric.Result.class, new ParseField(AucRocMetric.NAME), AucRocMetric.Result::fromXContent), @@ -51,6 +59,10 @@ EvaluationMetric.Result.class, new ParseField(AucRocMetric.NAME), AucRocMetric.R EvaluationMetric.Result.class, new ParseField(PrecisionMetric.NAME), PrecisionMetric.Result::fromXContent), new NamedXContentRegistry.Entry( EvaluationMetric.Result.class, new ParseField(RecallMetric.NAME), RecallMetric.Result::fromXContent), + new NamedXContentRegistry.Entry( + EvaluationMetric.Result.class, new ParseField(RSquaredMetric.NAME), RSquaredMetric.Result::fromXContent), + new NamedXContentRegistry.Entry( + EvaluationMetric.Result.class, new ParseField(MeanSquaredErrorMetric.NAME), MeanSquaredErrorMetric.Result::fromXContent), new NamedXContentRegistry.Entry( EvaluationMetric.Result.class, new ParseField(ConfusionMatrixMetric.NAME), ConfusionMatrixMetric.Result::fromXContent)); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetric.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetric.java new file mode 100644 index 0000000000000..5b961dacbcc52 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetric.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.regression; + +import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Calculates the mean squared error between two known numerical fields. + * + * equation: mse = 1/n * Σ(y - y´)^2 + */ +public class MeanSquaredErrorMetric implements EvaluationMetric { + + public static final String NAME = "mean_squared_error"; + + private static final ObjectParser PARSER = + new ObjectParser<>("mean_squared_error", true, MeanSquaredErrorMetric::new); + + public static MeanSquaredErrorMetric fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public MeanSquaredErrorMetric() { + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return true; + } + + @Override + public int hashCode() { + // create static hash code from name as there are currently no unique fields per class instance + return Objects.hashCode(NAME); + } + + @Override + public String getName() { + return NAME; + } + + public static class Result implements EvaluationMetric.Result { + + public static final ParseField ERROR = new ParseField("error"); + private final double error; + + public static Result fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("mean_squared_error_result", true, args -> new Result((double) args[0])); + + static { + PARSER.declareDouble(constructorArg(), ERROR); + } + + public Result(double error) { + this.error = error; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(ERROR.getPreferredName(), error); + builder.endObject(); + return builder; + } + + public double getError() { + return error; + } + + @Override + public String getMetricName() { + return NAME; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Result that = (Result) o; + return Objects.equals(that.error, this.error); + } + + @Override + public int hashCode() { + return Objects.hash(error); + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetric.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetric.java new file mode 100644 index 0000000000000..968489a30389f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetric.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.regression; + +import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Calculates R-Squared between two known numerical fields. + * + * equation: mse = 1 - SSres/SStot + * such that, + * SSres = Σ(y - y´)^2 + * SStot = Σ(y - y_mean)^2 + */ +public class RSquaredMetric implements EvaluationMetric { + + public static final String NAME = "r_squared"; + + private static final ObjectParser PARSER = + new ObjectParser<>("r_squared", true, RSquaredMetric::new); + + public static RSquaredMetric fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public RSquaredMetric() { + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return true; + } + + @Override + public int hashCode() { + // create static hash code from name as there are currently no unique fields per class instance + return Objects.hashCode(NAME); + } + + @Override + public String getName() { + return NAME; + } + + public static class Result implements EvaluationMetric.Result { + + public static final ParseField VALUE = new ParseField("value"); + private final double value; + + public static Result fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("r_squared_result", true, args -> new Result((double) args[0])); + + static { + PARSER.declareDouble(constructorArg(), VALUE); + } + + public Result(double value) { + this.value = value; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(VALUE.getPreferredName(), value); + builder.endObject(); + return builder; + } + + public double getValue() { + return value; + } + + @Override + public String getMetricName() { + return NAME; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Result that = (Result) o; + return Objects.equals(that.value, this.value); + } + + @Override + public int hashCode() { + return Objects.hash(value); + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/Regression.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/Regression.java new file mode 100644 index 0000000000000..79b9ab6eb1dd5 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/Regression.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.regression; + +import org.elasticsearch.client.ml.dataframe.evaluation.Evaluation; +import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; + +/** + * Evaluation of regression results. + */ +public class Regression implements Evaluation { + + public static final String NAME = "regression"; + + private static final ParseField ACTUAL_FIELD = new ParseField("actual_field"); + private static final ParseField PREDICTED_FIELD = new ParseField("predicted_field"); + private static final ParseField METRICS = new ParseField("metrics"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, true, a -> new Regression((String) a[0], (String) a[1], (List) a[2])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ACTUAL_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), PREDICTED_FIELD); + PARSER.declareNamedObjects(ConstructingObjectParser.optionalConstructorArg(), + (p, c, n) -> p.namedObject(EvaluationMetric.class, n, c), METRICS); + } + + public static Regression fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * The field containing the actual value + * The value of this field is assumed to be numeric + */ + private final String actualField; + + /** + * The field containing the predicted value + * The value of this field is assumed to be numeric + */ + private final String predictedField; + + /** + * The list of metrics to calculate + */ + private final List metrics; + + public Regression(String actualField, String predictedField) { + this(actualField, predictedField, (List)null); + } + + public Regression(String actualField, String predictedField, EvaluationMetric... metrics) { + this(actualField, predictedField, Arrays.asList(metrics)); + } + + public Regression(String actualField, String predictedField, @Nullable List metrics) { + this.actualField = Objects.requireNonNull(actualField); + this.predictedField = Objects.requireNonNull(predictedField); + if (metrics != null) { + metrics.sort(Comparator.comparing(EvaluationMetric::getName)); + } + this.metrics = metrics; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(ACTUAL_FIELD.getPreferredName(), actualField); + builder.field(PREDICTED_FIELD.getPreferredName(), predictedField); + + if (metrics != null) { + builder.startObject(METRICS.getPreferredName()); + for (EvaluationMetric metric : metrics) { + builder.field(metric.getName(), metric); + } + builder.endObject(); + } + + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Regression that = (Regression) o; + return Objects.equals(that.actualField, this.actualField) + && Objects.equals(that.predictedField, this.predictedField) + && Objects.equals(that.metrics, this.metrics); + } + + @Override + public int hashCode() { + return Objects.hash(actualField, predictedField, metrics); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassification.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassification.java index 6d5fa04da38e5..cb531c6ab044a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassification.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassification.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Comparator; import java.util.List; import java.util.Objects; @@ -52,6 +53,7 @@ public class BinarySoftClassification implements Evaluation { public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( NAME, + true, args -> new BinarySoftClassification((String) args[0], (String) args[1], (List) args[2])); static { @@ -80,6 +82,10 @@ public static BinarySoftClassification fromXContent(XContentParser parser) { */ private final List metrics; + public BinarySoftClassification(String actualField, String predictedField) { + this(actualField, predictedField, (List)null); + } + public BinarySoftClassification(String actualField, String predictedProbabilityField, EvaluationMetric... metric) { this(actualField, predictedProbabilityField, Arrays.asList(metric)); } @@ -88,7 +94,10 @@ public BinarySoftClassification(String actualField, String predictedProbabilityF @Nullable List metrics) { this.actualField = Objects.requireNonNull(actualField); this.predictedProbabilityField = Objects.requireNonNull(predictedProbabilityField); - this.metrics = Objects.requireNonNull(metrics); + if (metrics != null) { + metrics.sort(Comparator.comparing(EvaluationMetric::getName)); + } + this.metrics = metrics; } @Override @@ -102,11 +111,13 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.field(ACTUAL_FIELD.getPreferredName(), actualField); builder.field(PREDICTED_PROBABILITY_FIELD.getPreferredName(), predictedProbabilityField); - builder.startObject(METRICS.getPreferredName()); - for (EvaluationMetric metric : metrics) { - builder.field(metric.getName(), metric); + if (metrics != null) { + builder.startObject(METRICS.getPreferredName()); + for (EvaluationMetric metric : metrics) { + builder.field(metric.getName(), metric); + } + builder.endObject(); } - builder.endObject(); builder.endObject(); return builder; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/TimingStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/TimingStats.java index 73393140f30af..9493270c4b936 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/TimingStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/TimingStats.java @@ -39,6 +39,7 @@ public class TimingStats implements ToXContentObject { public static final ParseField BUCKET_COUNT = new ParseField("bucket_count"); + public static final ParseField TOTAL_BUCKET_PROCESSING_TIME_MS = new ParseField("total_bucket_processing_time_ms"); public static final ParseField MIN_BUCKET_PROCESSING_TIME_MS = new ParseField("minimum_bucket_processing_time_ms"); public static final ParseField MAX_BUCKET_PROCESSING_TIME_MS = new ParseField("maximum_bucket_processing_time_ms"); public static final ParseField AVG_BUCKET_PROCESSING_TIME_MS = new ParseField("average_bucket_processing_time_ms"); @@ -49,12 +50,28 @@ public class TimingStats implements ToXContentObject { new ConstructingObjectParser<>( "timing_stats", true, - args -> - new TimingStats((String) args[0], (long) args[1], (Double) args[2], (Double) args[3], (Double) args[4], (Double) args[5])); + args -> { + String jobId = (String) args[0]; + Long bucketCount = (Long) args[1]; + Double totalBucketProcessingTimeMs = (Double) args[2]; + Double minBucketProcessingTimeMs = (Double) args[3]; + Double maxBucketProcessingTimeMs = (Double) args[4]; + Double avgBucketProcessingTimeMs = (Double) args[5]; + Double exponentialAvgBucketProcessingTimeMs = (Double) args[6]; + return new TimingStats( + jobId, + getOrDefault(bucketCount, 0L), + getOrDefault(totalBucketProcessingTimeMs, 0.0), + minBucketProcessingTimeMs, + maxBucketProcessingTimeMs, + avgBucketProcessingTimeMs, + exponentialAvgBucketProcessingTimeMs); + }); static { PARSER.declareString(constructorArg(), Job.ID); - PARSER.declareLong(constructorArg(), BUCKET_COUNT); + PARSER.declareLong(optionalConstructorArg(), BUCKET_COUNT); + PARSER.declareDouble(optionalConstructorArg(), TOTAL_BUCKET_PROCESSING_TIME_MS); PARSER.declareDouble(optionalConstructorArg(), MIN_BUCKET_PROCESSING_TIME_MS); PARSER.declareDouble(optionalConstructorArg(), MAX_BUCKET_PROCESSING_TIME_MS); PARSER.declareDouble(optionalConstructorArg(), AVG_BUCKET_PROCESSING_TIME_MS); @@ -63,6 +80,7 @@ public class TimingStats implements ToXContentObject { private final String jobId; private long bucketCount; + private double totalBucketProcessingTimeMs; private Double minBucketProcessingTimeMs; private Double maxBucketProcessingTimeMs; private Double avgBucketProcessingTimeMs; @@ -71,12 +89,14 @@ public class TimingStats implements ToXContentObject { public TimingStats( String jobId, long bucketCount, + double totalBucketProcessingTimeMs, @Nullable Double minBucketProcessingTimeMs, @Nullable Double maxBucketProcessingTimeMs, @Nullable Double avgBucketProcessingTimeMs, @Nullable Double exponentialAvgBucketProcessingTimeMs) { this.jobId = jobId; this.bucketCount = bucketCount; + this.totalBucketProcessingTimeMs = totalBucketProcessingTimeMs; this.minBucketProcessingTimeMs = minBucketProcessingTimeMs; this.maxBucketProcessingTimeMs = maxBucketProcessingTimeMs; this.avgBucketProcessingTimeMs = avgBucketProcessingTimeMs; @@ -91,6 +111,10 @@ public long getBucketCount() { return bucketCount; } + public double getTotalBucketProcessingTimeMs() { + return totalBucketProcessingTimeMs; + } + public Double getMinBucketProcessingTimeMs() { return minBucketProcessingTimeMs; } @@ -112,6 +136,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.startObject(); builder.field(Job.ID.getPreferredName(), jobId); builder.field(BUCKET_COUNT.getPreferredName(), bucketCount); + builder.field(TOTAL_BUCKET_PROCESSING_TIME_MS.getPreferredName(), totalBucketProcessingTimeMs); if (minBucketProcessingTimeMs != null) { builder.field(MIN_BUCKET_PROCESSING_TIME_MS.getPreferredName(), minBucketProcessingTimeMs); } @@ -135,6 +160,7 @@ public boolean equals(Object o) { TimingStats that = (TimingStats) o; return Objects.equals(this.jobId, that.jobId) && this.bucketCount == that.bucketCount + && this.totalBucketProcessingTimeMs == that.totalBucketProcessingTimeMs && Objects.equals(this.minBucketProcessingTimeMs, that.minBucketProcessingTimeMs) && Objects.equals(this.maxBucketProcessingTimeMs, that.maxBucketProcessingTimeMs) && Objects.equals(this.avgBucketProcessingTimeMs, that.avgBucketProcessingTimeMs) @@ -146,6 +172,7 @@ public int hashCode() { return Objects.hash( jobId, bucketCount, + totalBucketProcessingTimeMs, minBucketProcessingTimeMs, maxBucketProcessingTimeMs, avgBucketProcessingTimeMs, @@ -156,4 +183,8 @@ public int hashCode() { public String toString() { return Strings.toString(this); } + + private static T getOrDefault(@Nullable T value, T defaultValue) { + return value != null ? value : defaultValue; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/DeleteSnapshotLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/DeleteSnapshotLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..712151def4a50 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/DeleteSnapshotLifecyclePolicyRequest.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.client.TimedRequest; + +import java.util.Objects; + +public class DeleteSnapshotLifecyclePolicyRequest extends TimedRequest { + private final String policyId; + + public DeleteSnapshotLifecyclePolicyRequest(String policyId) { + this.policyId = policyId; + } + + public String getPolicyId() { + return this.policyId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteSnapshotLifecyclePolicyRequest other = (DeleteSnapshotLifecyclePolicyRequest) o; + return this.policyId.equals(other.policyId); + } + + @Override + public int hashCode() { + return Objects.hash(this.policyId); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/ExecuteSnapshotLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/ExecuteSnapshotLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..3c32de6837405 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/ExecuteSnapshotLifecyclePolicyRequest.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.client.TimedRequest; + +import java.util.Objects; + +public class ExecuteSnapshotLifecyclePolicyRequest extends TimedRequest { + private final String policyId; + + public ExecuteSnapshotLifecyclePolicyRequest(String policyId) { + this.policyId = policyId; + } + + public String getPolicyId() { + return this.policyId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ExecuteSnapshotLifecyclePolicyRequest other = (ExecuteSnapshotLifecyclePolicyRequest) o; + return this.policyId.equals(other.policyId); + } + + @Override + public int hashCode() { + return Objects.hash(this.policyId); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/ExecuteSnapshotLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/ExecuteSnapshotLifecyclePolicyResponse.java new file mode 100644 index 0000000000000..b5698d715625b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/ExecuteSnapshotLifecyclePolicyResponse.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ExecuteSnapshotLifecyclePolicyResponse implements ToXContentObject { + + private static final ParseField SNAPSHOT_NAME = new ParseField("snapshot_name"); + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("excecute_snapshot_policy", true, + a -> new ExecuteSnapshotLifecyclePolicyResponse((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), SNAPSHOT_NAME); + } + + private final String snapshotName; + + public ExecuteSnapshotLifecyclePolicyResponse(String snapshotName) { + this.snapshotName = snapshotName; + } + + public static ExecuteSnapshotLifecyclePolicyResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public String getSnapshotName() { + return this.snapshotName; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SNAPSHOT_NAME.getPreferredName(), snapshotName); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + ExecuteSnapshotLifecyclePolicyResponse other = (ExecuteSnapshotLifecyclePolicyResponse) o; + return this.snapshotName.equals(other.snapshotName); + } + + @Override + public int hashCode() { + return this.snapshotName.hashCode(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/GetSnapshotLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/GetSnapshotLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..c754cc8878d29 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/GetSnapshotLifecyclePolicyRequest.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.client.TimedRequest; + +import java.util.Arrays; + +public class GetSnapshotLifecyclePolicyRequest extends TimedRequest { + private final String[] policyIds; + + public GetSnapshotLifecyclePolicyRequest(String... ids) { + this.policyIds = ids; + } + + public String[] getPolicyIds() { + return this.policyIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetSnapshotLifecyclePolicyRequest other = (GetSnapshotLifecyclePolicyRequest) o; + return Arrays.equals(this.policyIds, other.policyIds); + } + + @Override + public int hashCode() { + return Arrays.hashCode(this.policyIds); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/GetSnapshotLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/GetSnapshotLifecyclePolicyResponse.java new file mode 100644 index 0000000000000..68700bbb34bc4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/GetSnapshotLifecyclePolicyResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +public class GetSnapshotLifecyclePolicyResponse implements ToXContentObject { + + private final Map policies; + + public GetSnapshotLifecyclePolicyResponse(Map policies) { + this.policies = policies; + } + + public Map getPolicies() { + return this.policies; + } + + public static GetSnapshotLifecyclePolicyResponse fromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() == null) { + parser.nextToken(); + } + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + parser.nextToken(); + + Map policies = new HashMap<>(); + while (parser.isClosed() == false) { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + final String policyId = parser.currentName(); + SnapshotLifecyclePolicyMetadata policyDefinition = SnapshotLifecyclePolicyMetadata.parse(parser, policyId); + policies.put(policyId, policyDefinition); + } else { + parser.nextToken(); + } + } + return new GetSnapshotLifecyclePolicyResponse(policies); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + GetSnapshotLifecyclePolicyResponse other = (GetSnapshotLifecyclePolicyResponse) o; + return Objects.equals(this.policies, other.policies); + } + + @Override + public int hashCode() { + return Objects.hash(this.policies); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/PutSnapshotLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/PutSnapshotLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..7fb5794aee869 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/PutSnapshotLifecyclePolicyRequest.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class PutSnapshotLifecyclePolicyRequest extends TimedRequest implements ToXContentObject { + + private final SnapshotLifecyclePolicy policy; + + public PutSnapshotLifecyclePolicyRequest(SnapshotLifecyclePolicy policy) { + this.policy = Objects.requireNonNull(policy, "policy definition cannot be null"); + } + + public SnapshotLifecyclePolicy getPolicy() { + return policy; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + policy.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutSnapshotLifecyclePolicyRequest other = (PutSnapshotLifecyclePolicyRequest) o; + return Objects.equals(this.policy, other.policy); + } + + @Override + public int hashCode() { + return Objects.hash(this.policy); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotInvocationRecord.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotInvocationRecord.java new file mode 100644 index 0000000000000..ce5a7803c14e6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotInvocationRecord.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class SnapshotInvocationRecord implements ToXContentObject { + static final ParseField SNAPSHOT_NAME = new ParseField("snapshot_name"); + static final ParseField TIMESTAMP = new ParseField("time"); + static final ParseField DETAILS = new ParseField("details"); + + private String snapshotName; + private long timestamp; + private String details; + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_policy_invocation_record", true, + a -> new SnapshotInvocationRecord((String) a[0], (long) a[1], (String) a[2])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), SNAPSHOT_NAME); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIMESTAMP); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), DETAILS); + } + + public static SnapshotInvocationRecord parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + public SnapshotInvocationRecord(String snapshotName, long timestamp, String details) { + this.snapshotName = Objects.requireNonNull(snapshotName, "snapshot name must be provided"); + this.timestamp = timestamp; + this.details = details; + } + + public String getSnapshotName() { + return snapshotName; + } + + public long getTimestamp() { + return timestamp; + } + + public String getDetails() { + return details; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(SNAPSHOT_NAME.getPreferredName(), snapshotName); + builder.timeField(TIMESTAMP.getPreferredName(), "time_string", timestamp); + if (Objects.nonNull(details)) { + builder.field(DETAILS.getPreferredName(), details); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SnapshotInvocationRecord that = (SnapshotInvocationRecord) o; + return getTimestamp() == that.getTimestamp() && + Objects.equals(getSnapshotName(), that.getSnapshotName()) && + Objects.equals(getDetails(), that.getDetails()); + } + + @Override + public int hashCode() { + return Objects.hash(getSnapshotName(), getTimestamp(), getDetails()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotLifecyclePolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotLifecyclePolicy.java new file mode 100644 index 0000000000000..3fd357e2090a2 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotLifecyclePolicy.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class SnapshotLifecyclePolicy implements ToXContentObject { + + private final String id; + private final String name; + private final String schedule; + private final String repository; + private final Map configuration; + + private static final ParseField NAME = new ParseField("name"); + private static final ParseField SCHEDULE = new ParseField("schedule"); + private static final ParseField REPOSITORY = new ParseField("repository"); + private static final ParseField CONFIG = new ParseField("config"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_lifecycle", true, + (a, id) -> { + String name = (String) a[0]; + String schedule = (String) a[1]; + String repo = (String) a[2]; + Map config = (Map) a[3]; + return new SnapshotLifecyclePolicy(id, name, schedule, repo, config); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), SCHEDULE); + PARSER.declareString(ConstructingObjectParser.constructorArg(), REPOSITORY); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(), CONFIG); + } + + public SnapshotLifecyclePolicy(final String id, final String name, final String schedule, + final String repository, @Nullable Map configuration) { + this.id = Objects.requireNonNull(id); + this.name = name; + this.schedule = schedule; + this.repository = repository; + this.configuration = configuration; + } + + public String getId() { + return this.id; + } + + public String getName() { + return this.name; + } + + public String getSchedule() { + return this.schedule; + } + + public String getRepository() { + return this.repository; + } + + @Nullable + public Map getConfig() { + return this.configuration; + } + + public static SnapshotLifecyclePolicy parse(XContentParser parser, String id) { + return PARSER.apply(parser, id); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NAME.getPreferredName(), this.name); + builder.field(SCHEDULE.getPreferredName(), this.schedule); + builder.field(REPOSITORY.getPreferredName(), this.repository); + if (this.configuration != null) { + builder.field(CONFIG.getPreferredName(), this.configuration); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, schedule, repository, configuration); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (obj.getClass() != getClass()) { + return false; + } + SnapshotLifecyclePolicy other = (SnapshotLifecyclePolicy) obj; + return Objects.equals(id, other.id) && + Objects.equals(name, other.name) && + Objects.equals(schedule, other.schedule) && + Objects.equals(repository, other.repository) && + Objects.equals(configuration, other.configuration); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotLifecyclePolicyMetadata.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotLifecyclePolicyMetadata.java new file mode 100644 index 0000000000000..dc68b2fc6e14e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/snapshotlifecycle/SnapshotLifecyclePolicyMetadata.java @@ -0,0 +1,157 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.snapshotlifecycle; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { + + static final ParseField POLICY = new ParseField("policy"); + static final ParseField VERSION = new ParseField("version"); + static final ParseField MODIFIED_DATE_MILLIS = new ParseField("modified_date_millis"); + static final ParseField MODIFIED_DATE = new ParseField("modified_date"); + static final ParseField LAST_SUCCESS = new ParseField("last_success"); + static final ParseField LAST_FAILURE = new ParseField("last_failure"); + static final ParseField NEXT_EXECUTION_MILLIS = new ParseField("next_execution_millis"); + static final ParseField NEXT_EXECUTION = new ParseField("next_execution"); + + private final SnapshotLifecyclePolicy policy; + private final long version; + private final long modifiedDate; + private final long nextExecution; + @Nullable + private final SnapshotInvocationRecord lastSuccess; + @Nullable + private final SnapshotInvocationRecord lastFailure; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_policy_metadata", + a -> { + SnapshotLifecyclePolicy policy = (SnapshotLifecyclePolicy) a[0]; + long version = (long) a[1]; + long modifiedDate = (long) a[2]; + SnapshotInvocationRecord lastSuccess = (SnapshotInvocationRecord) a[3]; + SnapshotInvocationRecord lastFailure = (SnapshotInvocationRecord) a[4]; + long nextExecution = (long) a[5]; + + return new SnapshotLifecyclePolicyMetadata(policy, version, modifiedDate, lastSuccess, lastFailure, nextExecution); + }); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), SnapshotLifecyclePolicy::parse, POLICY); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_MILLIS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SnapshotInvocationRecord::parse, LAST_SUCCESS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SnapshotInvocationRecord::parse, LAST_FAILURE); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), NEXT_EXECUTION_MILLIS); + } + + public static SnapshotLifecyclePolicyMetadata parse(XContentParser parser, String id) { + return PARSER.apply(parser, id); + } + + public SnapshotLifecyclePolicyMetadata(SnapshotLifecyclePolicy policy, long version, long modifiedDate, + SnapshotInvocationRecord lastSuccess, SnapshotInvocationRecord lastFailure, + long nextExecution) { + this.policy = policy; + this.version = version; + this.modifiedDate = modifiedDate; + this.lastSuccess = lastSuccess; + this.lastFailure = lastFailure; + this.nextExecution = nextExecution; + } + + public SnapshotLifecyclePolicy getPolicy() { + return policy; + } + + public String getName() { + return policy.getName(); + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + public SnapshotInvocationRecord getLastSuccess() { + return lastSuccess; + } + + public SnapshotInvocationRecord getLastFailure() { + return lastFailure; + } + + public long getNextExecution() { + return this.nextExecution; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY.getPreferredName(), policy); + builder.field(VERSION.getPreferredName(), version); + builder.timeField(MODIFIED_DATE_MILLIS.getPreferredName(), MODIFIED_DATE.getPreferredName(), modifiedDate); + if (Objects.nonNull(lastSuccess)) { + builder.field(LAST_SUCCESS.getPreferredName(), lastSuccess); + } + if (Objects.nonNull(lastFailure)) { + builder.field(LAST_FAILURE.getPreferredName(), lastFailure); + } + builder.timeField(NEXT_EXECUTION_MILLIS.getPreferredName(), NEXT_EXECUTION.getPreferredName(), nextExecution); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(policy, version, modifiedDate, lastSuccess, lastFailure, nextExecution); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SnapshotLifecyclePolicyMetadata other = (SnapshotLifecyclePolicyMetadata) obj; + return Objects.equals(policy, other.policy) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate) && + Objects.equals(lastSuccess, other.lastSuccess) && + Objects.equals(lastFailure, other.lastFailure) && + Objects.equals(nextExecution, other.nextExecution); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcStreamableXContentTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcStreamableXContentTestCase.java deleted file mode 100644 index 1bf78c26a372e..0000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractHlrcStreamableXContentTestCase.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client; - -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; - -import java.io.IOException; - -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; - -/** - * @deprecated Use {@link AbstractResponseTestCase} instead of this class. - */ -// TODO: Remove and change subclasses to use AbstractResponseTestCase instead -@Deprecated -public abstract class AbstractHlrcStreamableXContentTestCase - extends AbstractStreamableXContentTestCase { - - /** - * Generic test that creates new instance of HLRC request/response from the test instance and checks - * both for equality and asserts equality on the two queries. - */ - public final void testHlrcFromXContent() throws IOException { - xContentTester(this::createParser, this::createTestInstance, getToXContentParams(), - p -> convertHlrcToInternal(doHlrcParseInstance(p))) - .numberOfTestRuns(NUMBER_OF_TEST_RUNS) - .supportsUnknownFields(supportsUnknownFields()) - .shuffleFieldsExceptions(getShuffleFieldsExceptions()) - .randomFieldsExcludeFilter(getRandomFieldsExcludeFilter()) - .assertEqualsConsumer(this::assertEqualInstances) - .assertToXContentEquivalence(true) - .test(); - } - - /** - * Parses to a new HLRC instance using the provided {@link XContentParser} - */ - public abstract H doHlrcParseInstance(XContentParser parser) throws IOException; - - /** - * Converts a HLRC instance to a XPack instance - */ - public abstract T convertHlrcToInternal(H instance); - - //TODO this would be final ideally: why do both responses need to parse from xcontent, only one (H) should? I think that T#fromXContent - //are only there for testing and could go away? Then the additional testHlrcFromXContent is also no longer needed. - @Override - protected T doParseInstance(XContentParser parser) throws IOException { - return convertHlrcToInternal(doHlrcParseInstance(parser)); - } -} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java index dc49e6f88a6e4..4c8e05aa28ea5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java @@ -170,7 +170,7 @@ public void testTypeGlobalAndPerRequest() throws IOException { public void testGlobalRouting() throws IOException { createIndexWithMultipleShards("index"); - BulkRequest request = new BulkRequest(null); + BulkRequest request = new BulkRequest((String) null); request.add(new IndexRequest("index").id("1") .source(XContentType.JSON, "field", "bulk1")); request.add(new IndexRequest("index").id("2") @@ -186,7 +186,7 @@ public void testGlobalRouting() throws IOException { } public void testMixLocalAndGlobalRouting() throws IOException { - BulkRequest request = new BulkRequest(null); + BulkRequest request = new BulkRequest((String) null); request.routing("globalRouting"); request.add(new IndexRequest("index").id("1") .source(XContentType.JSON, "field", "bulk1")); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 6c161444e2475..c828a73e17d13 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -1020,6 +1020,24 @@ public void testParamsEncode() throws IOException { } } + public void testGetIdWithPlusSign() throws Exception { + String id = "id+id"; + { + IndexRequest indexRequest = new IndexRequest("index").id(id); + indexRequest.source("field", "value"); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); + assertEquals("index", indexResponse.getIndex()); + assertEquals(id, indexResponse.getId()); + } + { + GetRequest getRequest = new GetRequest("index").id(id); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); + assertTrue(getResponse.isExists()); + assertEquals("index", getResponse.getIndex()); + assertEquals(id, getResponse.getId()); + } + } + // Not entirely sure if _termvectors belongs to CRUD, and in the absence of a better place, will have it here public void testTermvectors() throws IOException { final String sourceIndex = "index1"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java index db111904f4704..85ff77fd6fb0a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java @@ -50,6 +50,8 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; public class DataFrameRequestConvertersTests extends ESTestCase { @@ -66,7 +68,7 @@ public void testPutDataFrameTransform() throws IOException { PutDataFrameTransformRequest putRequest = new PutDataFrameTransformRequest( DataFrameTransformConfigTests.randomDataFrameTransformConfig()); Request request = DataFrameRequestConverters.putDataFrameTransform(putRequest); - + assertThat(request.getParameters(), not(hasKey("defer_validation"))); assertEquals(HttpPut.METHOD_NAME, request.getMethod()); assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + putRequest.getConfig().getId())); @@ -74,6 +76,9 @@ public void testPutDataFrameTransform() throws IOException { DataFrameTransformConfig parsedConfig = DataFrameTransformConfig.PARSER.apply(parser, null); assertThat(parsedConfig, equalTo(putRequest.getConfig())); } + putRequest.setDeferValidation(true); + request = DataFrameRequestConverters.putDataFrameTransform(putRequest); + assertThat(request.getParameters(), hasEntry("defer_validation", Boolean.toString(putRequest.getDeferValidation()))); } public void testDeleteDataFrameTransform() { @@ -82,6 +87,13 @@ public void testDeleteDataFrameTransform() { assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo")); + + assertThat(request.getParameters(), not(hasKey("force"))); + + deleteRequest.setForce(true); + request = DataFrameRequestConverters.deleteDataFrameTransform(deleteRequest); + + assertThat(request.getParameters(), hasEntry("force", "true")); } public void testStartDataFrameTransform() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index d7371468b1506..6204f1f7a57dd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -180,6 +180,22 @@ public void testCreateDelete() throws IOException { assertThat(deleteError.getMessage(), containsString("Transform with id [test-crud] could not be found")); } + public void testCreateDeleteWithDefer() throws IOException { + String sourceIndex = "missing-source-index"; + + String id = "test-with-defer"; + DataFrameTransformConfig transform = validDataFrameTransformConfig(id, sourceIndex, "pivot-dest"); + DataFrameClient client = highLevelClient().dataFrame(); + PutDataFrameTransformRequest request = new PutDataFrameTransformRequest(transform); + request.setDeferValidation(true); + AcknowledgedResponse ack = execute(request, client::putDataFrameTransform, client::putDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + + ack = execute(new DeleteDataFrameTransformRequest(transform.getId()), client::deleteDataFrameTransform, + client::deleteDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + } + public void testGetTransform() throws IOException { String sourceIndex = "transform-source"; createIndex(sourceIndex); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index b542db9c4b0bf..5a92602d004f1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -123,6 +123,9 @@ import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsStats; import org.elasticsearch.client.ml.dataframe.OutlierDetection; import org.elasticsearch.client.ml.dataframe.QueryConfig; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.MeanSquaredErrorMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.RSquaredMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.Regression; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.AucRocMetric; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.BinarySoftClassification; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.ConfusionMatrixMetric; @@ -1578,6 +1581,38 @@ public void testEvaluateDataFrame() throws IOException { assertThat(curvePointAtThreshold1.getTruePositiveRate(), equalTo(0.0)); assertThat(curvePointAtThreshold1.getFalsePositiveRate(), equalTo(0.0)); assertThat(curvePointAtThreshold1.getThreshold(), equalTo(1.0)); + + String regressionIndex = "evaluate-regression-test-index"; + createIndex(regressionIndex, mappingForRegression()); + BulkRequest regressionBulk = new BulkRequest() + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .add(docForRegression(regressionIndex, 0.3, 0.1)) // #0 + .add(docForRegression(regressionIndex, 0.3, 0.2)) // #1 + .add(docForRegression(regressionIndex, 0.3, 0.3)) // #2 + .add(docForRegression(regressionIndex, 0.3, 0.4)) // #3 + .add(docForRegression(regressionIndex, 0.3, 0.7)) // #4 + .add(docForRegression(regressionIndex, 0.5, 0.2)) // #5 + .add(docForRegression(regressionIndex, 0.5, 0.3)) // #6 + .add(docForRegression(regressionIndex, 0.5, 0.4)) // #7 + .add(docForRegression(regressionIndex, 0.5, 0.8)) // #8 + .add(docForRegression(regressionIndex, 0.5, 0.9)); // #9 + highLevelClient().bulk(regressionBulk, RequestOptions.DEFAULT); + + evaluateDataFrameRequest = new EvaluateDataFrameRequest(regressionIndex, + new Regression(actualRegression, probabilityRegression, new MeanSquaredErrorMetric(), new RSquaredMetric())); + + evaluateDataFrameResponse = + execute(evaluateDataFrameRequest, machineLearningClient::evaluateDataFrame, machineLearningClient::evaluateDataFrameAsync); + assertThat(evaluateDataFrameResponse.getEvaluationName(), equalTo(Regression.NAME)); + assertThat(evaluateDataFrameResponse.getMetrics().size(), equalTo(2)); + + MeanSquaredErrorMetric.Result mseResult = evaluateDataFrameResponse.getMetricByName(MeanSquaredErrorMetric.NAME); + assertThat(mseResult.getMetricName(), equalTo(MeanSquaredErrorMetric.NAME)); + assertThat(mseResult.getError(), closeTo(0.061000000, 1e-9)); + + RSquaredMetric.Result rSquaredResult = evaluateDataFrameResponse.getMetricByName(RSquaredMetric.NAME); + assertThat(rSquaredResult.getMetricName(), equalTo(RSquaredMetric.NAME)); + assertThat(rSquaredResult.getValue(), closeTo(-5.1000000000000005, 1e-9)); } private static XContentBuilder defaultMappingForTest() throws IOException { @@ -1615,6 +1650,28 @@ private static IndexRequest docForClassification(String indexName, boolean isTru .source(XContentType.JSON, actualField, Boolean.toString(isTrue), probabilityField, p); } + private static final String actualRegression = "regression_actual"; + private static final String probabilityRegression = "regression_prob"; + + private static XContentBuilder mappingForRegression() throws IOException { + return XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject(actualRegression) + .field("type", "double") + .endObject() + .startObject(probabilityRegression) + .field("type", "double") + .endObject() + .endObject() + .endObject(); + } + + private static IndexRequest docForRegression(String indexName, double act, double p) { + return new IndexRequest() + .index(indexName) + .source(XContentType.JSON, actualRegression, act, probabilityRegression, p); + } + private void createIndex(String indexName, XContentBuilder mapping) throws IOException { highLevelClient().indices().create(new CreateIndexRequest(indexName).mapping(mapping), RequestOptions.DEFAULT); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index ae1cd5eb45edf..98c5cf870309c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -60,6 +60,9 @@ import org.elasticsearch.client.indexlifecycle.UnfollowAction; import org.elasticsearch.client.ml.dataframe.DataFrameAnalysis; import org.elasticsearch.client.ml.dataframe.OutlierDetection; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.MeanSquaredErrorMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.RSquaredMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.Regression; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.AucRocMetric; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.BinarySoftClassification; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.ConfusionMatrixMetric; @@ -674,7 +677,7 @@ public void testDefaultNamedXContents() { public void testProvidedNamedXContents() { List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); - assertEquals(31, namedXContents.size()); + assertEquals(36, namedXContents.size()); Map, Integer> categories = new HashMap<>(); List names = new ArrayList<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { @@ -712,12 +715,24 @@ public void testProvidedNamedXContents() { assertTrue(names.contains(OutlierDetection.NAME.getPreferredName())); assertEquals(Integer.valueOf(1), categories.get(SyncConfig.class)); assertTrue(names.contains(TimeSyncConfig.NAME)); - assertEquals(Integer.valueOf(1), categories.get(org.elasticsearch.client.ml.dataframe.evaluation.Evaluation.class)); - assertThat(names, hasItems(BinarySoftClassification.NAME)); - assertEquals(Integer.valueOf(4), categories.get(org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric.class)); - assertThat(names, hasItems(AucRocMetric.NAME, PrecisionMetric.NAME, RecallMetric.NAME, ConfusionMatrixMetric.NAME)); - assertEquals(Integer.valueOf(4), categories.get(org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric.Result.class)); - assertThat(names, hasItems(AucRocMetric.NAME, PrecisionMetric.NAME, RecallMetric.NAME, ConfusionMatrixMetric.NAME)); + assertEquals(Integer.valueOf(2), categories.get(org.elasticsearch.client.ml.dataframe.evaluation.Evaluation.class)); + assertThat(names, hasItems(BinarySoftClassification.NAME, Regression.NAME)); + assertEquals(Integer.valueOf(6), categories.get(org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric.class)); + assertThat(names, + hasItems(AucRocMetric.NAME, + PrecisionMetric.NAME, + RecallMetric.NAME, + ConfusionMatrixMetric.NAME, + MeanSquaredErrorMetric.NAME, + RSquaredMetric.NAME)); + assertEquals(Integer.valueOf(6), categories.get(org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric.Result.class)); + assertThat(names, + hasItems(AucRocMetric.NAME, + PrecisionMetric.NAME, + RecallMetric.NAME, + ConfusionMatrixMetric.NAME, + MeanSquaredErrorMetric.NAME, + RSquaredMetric.NAME)); } public void testApiNamingConventions() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java index 8057a92b3f279..f5cb9cdb0e02b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java @@ -191,7 +191,7 @@ TestPlan createTestPlan() throws IOException { ); } else { successful++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse()); + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null)); } } shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java index 59a7a830a6304..3eaa1e157dc90 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/XPackInfoResponseTests.java @@ -18,42 +18,20 @@ */ package org.elasticsearch.client; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; -import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import java.util.HashMap; +import java.io.IOException; import java.util.HashSet; -import java.util.Map; import java.util.Set; -import java.util.function.Function; -import java.util.function.Predicate; -import java.io.IOException; import java.util.stream.Collectors; -public class XPackInfoResponseTests extends - AbstractHlrcStreamableXContentTestCase { - - @Override - protected XPackInfoResponse createBlankInstance() { - return new XPackInfoResponse(); - } - - @Override - public org.elasticsearch.client.xpack.XPackInfoResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.xpack.XPackInfoResponse.fromXContent(parser); - } - - @Override - public XPackInfoResponse convertHlrcToInternal(org.elasticsearch.client.xpack.XPackInfoResponse instance) { - return new XPackInfoResponse(convertHlrcToInternal(instance.getBuildInfo()), - convertHlrcToInternal(instance.getLicenseInfo()), convertHlrcToInternal(instance.getFeatureSetsInfo())); - } +public class XPackInfoResponseTests extends AbstractResponseTestCase { private BuildInfo convertHlrcToInternal(org.elasticsearch.client.xpack.XPackInfoResponse.BuildInfo buildInfo) { return buildInfo != null ? new BuildInfo(buildInfo.getHash(), buildInfo.getTimestamp()) : null; @@ -75,64 +53,12 @@ private FeatureSetsInfo convertHlrcToInternal(org.elasticsearch.client.xpack.XPa : null; } - @Override - protected Predicate getRandomFieldsExcludeFilter() { - return path -> path.equals("features") - || (path.startsWith("features") && path.endsWith("native_code_info")); - } - - @Override - protected ToXContent.Params getToXContentParams() { - Map params = new HashMap<>(); - if (randomBoolean()) { - params.put("human", randomBoolean() ? "true" : "false"); - } - if (randomBoolean()) { - params.put("categories", "_none"); - } - return new ToXContent.MapParams(params); - } - - @Override - protected XPackInfoResponse createTestInstance() { - return new XPackInfoResponse( - randomBoolean() ? null : randomBuildInfo(), - randomBoolean() ? null : randomLicenseInfo(), - randomBoolean() ? null : randomFeatureSetsInfo()); - } - - @Override - protected XPackInfoResponse mutateInstance(XPackInfoResponse response) { - @SuppressWarnings("unchecked") - Function mutator = randomFrom( - r -> new XPackInfoResponse( - mutateBuildInfo(r.getBuildInfo()), - r.getLicenseInfo(), - r.getFeatureSetsInfo()), - r -> new XPackInfoResponse( - r.getBuildInfo(), - mutateLicenseInfo(r.getLicenseInfo()), - r.getFeatureSetsInfo()), - r -> new XPackInfoResponse( - r.getBuildInfo(), - r.getLicenseInfo(), - mutateFeatureSetsInfo(r.getFeatureSetsInfo()))); - return mutator.apply(response); - } - private BuildInfo randomBuildInfo() { return new BuildInfo( randomAlphaOfLength(10), randomAlphaOfLength(15)); } - private BuildInfo mutateBuildInfo(BuildInfo buildInfo) { - if (buildInfo == null) { - return randomBuildInfo(); - } - return null; - } - private LicenseInfo randomLicenseInfo() { return new LicenseInfo( randomAlphaOfLength(10), @@ -142,13 +68,6 @@ private LicenseInfo randomLicenseInfo() { randomLong()); } - private LicenseInfo mutateLicenseInfo(LicenseInfo licenseInfo) { - if (licenseInfo == null) { - return randomLicenseInfo(); - } - return null; - } - private FeatureSetsInfo randomFeatureSetsInfo() { int size = between(0, 10); Set featureSets = new HashSet<>(size); @@ -158,17 +77,30 @@ private FeatureSetsInfo randomFeatureSetsInfo() { return new FeatureSetsInfo(featureSets); } - private FeatureSetsInfo mutateFeatureSetsInfo(FeatureSetsInfo featureSetsInfo) { - if (featureSetsInfo == null) { - return randomFeatureSetsInfo(); - } - return null; - } - private FeatureSet randomFeatureSet() { return new FeatureSet( randomAlphaOfLength(5), randomBoolean(), randomBoolean()); } + + @Override + protected XPackInfoResponse createServerTestInstance() { + return new XPackInfoResponse( + randomBoolean() ? null : randomBuildInfo(), + randomBoolean() ? null : randomLicenseInfo(), + randomBoolean() ? null : randomFeatureSetsInfo()); + } + + @Override + protected org.elasticsearch.client.xpack.XPackInfoResponse doParseToClientInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.xpack.XPackInfoResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(XPackInfoResponse serverTestInstance, org.elasticsearch.client.xpack.XPackInfoResponse clientInstance) { + XPackInfoResponse serverInstance = new XPackInfoResponse(convertHlrcToInternal(clientInstance.getBuildInfo()), + convertHlrcToInternal(clientInstance.getLicenseInfo()), convertHlrcToInternal(clientInstance.getFeatureSetsInfo())); + assertEquals(serverTestInstance, serverInstance); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPositionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPositionTests.java new file mode 100644 index 0000000000000..cd17dd3fe8eb2 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPositionTests.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.Map; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class DataFrameIndexerPositionTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + DataFrameIndexerPositionTests::randomDataFrameIndexerPosition, + DataFrameIndexerPositionTests::toXContent, + DataFrameIndexerPosition::fromXContent) + .supportsUnknownFields(true) + .randomFieldsExcludeFilter(field -> field.equals("indexer_position") || + field.equals("bucket_position")) + .test(); + } + + public static DataFrameIndexerPosition randomDataFrameIndexerPosition() { + return new DataFrameIndexerPosition(randomPositionMap(), randomPositionMap()); + } + + public static void toXContent(DataFrameIndexerPosition position, XContentBuilder builder) throws IOException { + builder.startObject(); + if (position.getIndexerPosition() != null) { + builder.field("indexer_position", position.getIndexerPosition()); + } + if (position.getBucketsPosition() != null) { + builder.field("bucket_position", position.getBucketsPosition()); + } + builder.endObject(); + } + + private static Map randomPositionMap() { + if (randomBoolean()) { + return null; + } + int numFields = randomIntBetween(1, 5); + Map position = new LinkedHashMap<>(); + for (int i = 0; i < numFields; i++) { + Object value; + if (randomBoolean()) { + value = randomLong(); + } else { + value = randomAlphaOfLengthBetween(1, 10); + } + position.put(randomAlphaOfLengthBetween(3, 10), value); + } + return position; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java index 79b7e85098e04..7a42a6f70d950 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfigTests; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchModule; @@ -43,6 +44,7 @@ public static DataFrameTransformConfig randomDataFrameTransformConfig() { return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomSourceConfig(), randomDestConfig(), + randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1000, 1000000)), randomBoolean() ? null : randomSyncConfig(), PivotConfigTests.randomPivotConfig(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 100), diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java index ebb62890c3cdd..ef1cf3e89b6bf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java @@ -25,8 +25,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.LinkedHashMap; -import java.util.Map; import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; @@ -38,15 +36,16 @@ public void testFromXContent() throws IOException { DataFrameTransformStateTests::toXContent, DataFrameTransformState::fromXContent) .supportsUnknownFields(true) - .randomFieldsExcludeFilter(field -> field.equals("current_position") || - field.equals("node.attributes")) + .randomFieldsExcludeFilter(field -> field.equals("position.indexer_position") || + field.equals("position.bucket_position") || + field.equals("node.attributes")) .test(); } public static DataFrameTransformState randomDataFrameTransformState() { return new DataFrameTransformState(randomFrom(DataFrameTransformTaskState.values()), randomFrom(IndexerState.values()), - randomPositionMap(), + randomBoolean() ? null : DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(), randomLongBetween(0,10), randomBoolean() ? null : randomAlphaOfLength(10), randomBoolean() ? null : DataFrameTransformProgressTests.randomInstance(), @@ -58,7 +57,8 @@ public static void toXContent(DataFrameTransformState state, XContentBuilder bui builder.field("task_state", state.getTaskState().value()); builder.field("indexer_state", state.getIndexerState().value()); if (state.getPosition() != null) { - builder.field("current_position", state.getPosition()); + builder.field("position"); + DataFrameIndexerPositionTests.toXContent(state.getPosition(), builder); } builder.field("checkpoint", state.getCheckpoint()); if (state.getReason() != null) { @@ -75,21 +75,4 @@ public static void toXContent(DataFrameTransformState state, XContentBuilder bui builder.endObject(); } - private static Map randomPositionMap() { - if (randomBoolean()) { - return null; - } - int numFields = randomIntBetween(1, 5); - Map position = new LinkedHashMap<>(); - for (int i = 0; i < numFields; i++) { - Object value; - if (randomBoolean()) { - value = randomLong(); - } else { - value = randomAlphaOfLengthBetween(1, 10); - } - position.put(randomAlphaOfLengthBetween(3, 10), value); - } - return position; - } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java new file mode 100644 index 0000000000000..9cf33e6500c72 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms.hlrc; + +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; + +import java.util.LinkedHashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class DataFrameIndexerPositionTests extends AbstractResponseTestCase< + DataFrameIndexerPosition, + org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition> { + + public static DataFrameIndexerPosition fromHlrc( + org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition instance) { + if (instance == null) { + return null; + } + return new DataFrameIndexerPosition(instance.getIndexerPosition(), instance.getBucketsPosition()); + } + + @Override + protected DataFrameIndexerPosition createServerTestInstance() { + return new DataFrameIndexerPosition(randomPositionMap(), randomPositionMap()); + } + + @Override + protected org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition doParseToClientInstance(XContentParser parser) { + return org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition.fromXContent(parser); + } + + @Override + protected void assertInstances(DataFrameIndexerPosition serverTestInstance, + org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition clientInstance) { + assertThat(serverTestInstance.getIndexerPosition(), equalTo(clientInstance.getIndexerPosition())); + assertThat(serverTestInstance.getBucketsPosition(), equalTo(clientInstance.getBucketsPosition())); + } + + private static Map randomPositionMap() { + if (randomBoolean()) { + return null; + } + int numFields = randomIntBetween(1, 5); + Map position = new LinkedHashMap<>(); + for (int i = 0; i < numFields; i++) { + Object value; + if (randomBoolean()) { + value = randomLong(); + } else { + value = randomAlphaOfLengthBetween(1, 10); + } + position.put(randomAlphaOfLengthBetween(3, 10), value); + } + return position; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java index dde44898bf90b..7b54ab538c30d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.client.dataframe.transforms.hlrc; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.client.AbstractHlrcXContentTestCase; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; @@ -64,7 +64,10 @@ protected boolean supportsUnknownFields() { @Override protected Predicate getRandomFieldsExcludeFilter() { - return field -> field.equals("state.current_position") || field.equals("state.node") || field.equals("state.node.attributes"); + return field -> field.equals("state.position.indexer_position") || + field.equals("state.position.bucket_position") || + field.equals("state.node") || + field.equals("state.node.attributes"); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java index b97e0a72c1fa2..6d378bca5f818 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java @@ -19,8 +19,9 @@ package org.elasticsearch.client.dataframe.transforms.hlrc; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.client.AbstractHlrcXContentTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; @@ -42,7 +43,7 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase getRandomFieldsExcludeFilter() { - return field -> field.equals("current_position") || field.equals("node.attributes"); + return field -> field.equals("position.indexer_position") || + field.equals("position.bucket_position") || + field.equals("node.attributes"); } public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats(String id) { @@ -95,6 +98,10 @@ public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndSt randomDataFrameTransformCheckpointingInfo()); } + public static DataFrameIndexerPosition randomDataFrameIndexerPosition() { + return new DataFrameIndexerPosition(randomPosition(), randomPosition()); + } + public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { return new DataFrameTransformCheckpointingInfo(randomDataFrameTransformCheckpointStats(), randomDataFrameTransformCheckpointStats(), randomNonNegativeLong()); @@ -134,7 +141,7 @@ public static DataFrameIndexerTransformStats randomStats(String transformId) { public static DataFrameTransformState randomDataFrameTransformState() { return new DataFrameTransformState(randomFrom(DataFrameTransformTaskState.values()), randomFrom(IndexerState.values()), - randomPosition(), + randomDataFrameIndexerPosition(), randomLongBetween(0,10), randomBoolean() ? null : randomAlphaOfLength(10), randomBoolean() ? null : randomDataFrameTransformProgress(), diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 731d42f902c50..88332a0c13c5d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -156,8 +156,9 @@ public void testPutDataFrameTransform() throws IOException, InterruptedException .setId("reviewer-avg-rating") // <1> .setSource(sourceConfig) // <2> .setDest(destConfig) // <3> - .setPivotConfig(pivotConfig) // <4> - .setDescription("This is my test transform") // <5> + .setFrequency(TimeValue.timeValueSeconds(15)) // <4> + .setPivotConfig(pivotConfig) // <5> + .setDescription("This is my test transform") // <6> .build(); // end::put-data-frame-transform-config @@ -165,6 +166,7 @@ public void testPutDataFrameTransform() throws IOException, InterruptedException // tag::put-data-frame-transform-request PutDataFrameTransformRequest request = new PutDataFrameTransformRequest(transformConfig); // <1> + request.setDeferValidation(false); // <2> // end::put-data-frame-transform-request // tag::put-data-frame-transform-execute @@ -373,6 +375,7 @@ public void testDeleteDataFrameTransform() throws IOException, InterruptedExcept // tag::delete-data-frame-transform-request DeleteDataFrameTransformRequest request = new DeleteDataFrameTransformRequest("mega-transform"); // <1> + request.setForce(false); // <2> // end::delete-data-frame-transform-request // tag::delete-data-frame-transform-execute diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java index db9df0ac24c78..c7ac357a31ef0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java @@ -22,6 +22,9 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RequestOptions; @@ -51,6 +54,15 @@ import org.elasticsearch.client.indexlifecycle.StartILMRequest; import org.elasticsearch.client.indexlifecycle.StopILMRequest; import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.snapshotlifecycle.DeleteSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyResponse; +import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyResponse; +import org.elasticsearch.client.snapshotlifecycle.PutSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.SnapshotInvocationRecord; +import org.elasticsearch.client.snapshotlifecycle.SnapshotLifecyclePolicy; +import org.elasticsearch.client.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -60,6 +72,9 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotState; import org.hamcrest.Matchers; import java.io.IOException; @@ -68,6 +83,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -740,6 +756,237 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testAddSnapshotLifecyclePolicy() throws Exception { + RestHighLevelClient client = highLevelClient(); + + PutRepositoryRequest repoRequest = new PutRepositoryRequest(); + + Settings.Builder settingsBuilder = Settings.builder().put("location", "."); + repoRequest.settings(settingsBuilder); + repoRequest.name("my_repository"); + repoRequest.type(FsRepository.TYPE); + org.elasticsearch.action.support.master.AcknowledgedResponse response = + client.snapshot().createRepository(repoRequest, RequestOptions.DEFAULT); + assertTrue(response.isAcknowledged()); + + //////// PUT + // tag::slm-put-snapshot-lifecycle-policy + Map config = new HashMap<>(); + config.put("indices", Collections.singletonList("idx")); + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "policy_id", "name", "1 2 3 * * ?", "my_repository", config); + PutSnapshotLifecyclePolicyRequest request = + new PutSnapshotLifecyclePolicyRequest(policy); + // end::slm-put-snapshot-lifecycle-policy + + // tag::slm-put-snapshot-lifecycle-policy-execute + AcknowledgedResponse resp = client.indexLifecycle() + .putSnapshotLifecyclePolicy(request, RequestOptions.DEFAULT); + // end::slm-put-snapshot-lifecycle-policy-execute + + // tag::slm-put-snapshot-lifecycle-policy-response + boolean putAcknowledged = resp.isAcknowledged(); // <1> + // end::slm-put-snapshot-lifecycle-policy-response + assertTrue(putAcknowledged); + + // tag::slm-put-snapshot-lifecycle-policy-execute-listener + ActionListener putListener = + new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse resp) { + boolean acknowledged = resp.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::slm-put-snapshot-lifecycle-policy-execute-listener + + // tag::slm-put-snapshot-lifecycle-policy-execute-async + client.indexLifecycle().putSnapshotLifecyclePolicyAsync(request, + RequestOptions.DEFAULT, putListener); + // end::slm-put-snapshot-lifecycle-policy-execute-async + + //////// GET + // tag::slm-get-snapshot-lifecycle-policy + GetSnapshotLifecyclePolicyRequest getAllRequest = + new GetSnapshotLifecyclePolicyRequest(); // <1> + GetSnapshotLifecyclePolicyRequest getRequest = + new GetSnapshotLifecyclePolicyRequest("policy_id"); // <2> + // end::slm-get-snapshot-lifecycle-policy + + // tag::slm-get-snapshot-lifecycle-policy-execute + GetSnapshotLifecyclePolicyResponse getResponse = + client.indexLifecycle() + .getSnapshotLifecyclePolicy(getRequest, + RequestOptions.DEFAULT); + // end::slm-get-snapshot-lifecycle-policy-execute + + // tag::slm-get-snapshot-lifecycle-policy-execute-listener + ActionListener getListener = + new ActionListener<>() { + @Override + public void onResponse(GetSnapshotLifecyclePolicyResponse resp) { + Map policies = + resp.getPolicies(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::slm-get-snapshot-lifecycle-policy-execute-listener + + // tag::slm-get-snapshot-lifecycle-policy-execute-async + client.indexLifecycle().getSnapshotLifecyclePolicyAsync(getRequest, + RequestOptions.DEFAULT, getListener); + // end::slm-get-snapshot-lifecycle-policy-execute-async + + assertThat(getResponse.getPolicies().size(), equalTo(1)); + // tag::slm-get-snapshot-lifecycle-policy-response + SnapshotLifecyclePolicyMetadata policyMeta = + getResponse.getPolicies().get("policy_id"); // <1> + long policyVersion = policyMeta.getVersion(); + long policyModificationDate = policyMeta.getModifiedDate(); + long nextPolicyExecutionDate = policyMeta.getNextExecution(); + SnapshotInvocationRecord lastSuccess = policyMeta.getLastSuccess(); + SnapshotInvocationRecord lastFailure = policyMeta.getLastFailure(); + SnapshotLifecyclePolicy retrievedPolicy = policyMeta.getPolicy(); // <2> + String id = retrievedPolicy.getId(); + String snapshotNameFormat = retrievedPolicy.getName(); + String repositoryName = retrievedPolicy.getRepository(); + String schedule = retrievedPolicy.getSchedule(); + Map snapshotConfiguration = retrievedPolicy.getConfig(); + // end::slm-get-snapshot-lifecycle-policy-response + + assertNotNull(policyMeta); + assertThat(retrievedPolicy, equalTo(policy)); + assertThat(policyVersion, equalTo(1L)); + + createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); + + //////// EXECUTE + // tag::slm-execute-snapshot-lifecycle-policy + ExecuteSnapshotLifecyclePolicyRequest executeRequest = + new ExecuteSnapshotLifecyclePolicyRequest("policy_id"); // <1> + // end::slm-execute-snapshot-lifecycle-policy + + // tag::slm-execute-snapshot-lifecycle-policy-execute + ExecuteSnapshotLifecyclePolicyResponse executeResponse = + client.indexLifecycle() + .executeSnapshotLifecyclePolicy(executeRequest, + RequestOptions.DEFAULT); + // end::slm-execute-snapshot-lifecycle-policy-execute + + // tag::slm-execute-snapshot-lifecycle-policy-response + final String snapshotName = executeResponse.getSnapshotName(); // <1> + // end::slm-execute-snapshot-lifecycle-policy-response + + assertSnapshotExists(client, "my_repository", snapshotName); + + // tag::slm-execute-snapshot-lifecycle-policy-execute-listener + ActionListener executeListener = + new ActionListener<>() { + @Override + public void onResponse(ExecuteSnapshotLifecyclePolicyResponse r) { + String snapshotName = r.getSnapshotName(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::slm-execute-snapshot-lifecycle-policy-execute-listener + + // We need a listener that will actually wait for the snapshot to be created + CountDownLatch latch = new CountDownLatch(1); + executeListener = + new ActionListener<>() { + @Override + public void onResponse(ExecuteSnapshotLifecyclePolicyResponse r) { + try { + assertSnapshotExists(client, "my_repository", r.getSnapshotName()); + } catch (Exception e) { + // Ignore + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + latch.countDown(); + fail("failed to execute slm execute: " + e); + } + }; + + // tag::slm-execute-snapshot-lifecycle-policy-execute-async + client.indexLifecycle() + .executeSnapshotLifecyclePolicyAsync(executeRequest, + RequestOptions.DEFAULT, executeListener); + // end::slm-execute-snapshot-lifecycle-policy-execute-async + latch.await(5, TimeUnit.SECONDS); + + //////// DELETE + // tag::slm-delete-snapshot-lifecycle-policy + DeleteSnapshotLifecyclePolicyRequest deleteRequest = + new DeleteSnapshotLifecyclePolicyRequest("policy_id"); // <1> + // end::slm-delete-snapshot-lifecycle-policy + + // tag::slm-delete-snapshot-lifecycle-policy-execute + AcknowledgedResponse deleteResp = client.indexLifecycle() + .deleteSnapshotLifecyclePolicy(deleteRequest, RequestOptions.DEFAULT); + // end::slm-delete-snapshot-lifecycle-policy-execute + assertTrue(deleteResp.isAcknowledged()); + + ActionListener deleteListener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse resp) { + // no-op + } + + @Override + public void onFailure(Exception e) { + // no-op + } + }; + + // tag::slm-delete-snapshot-lifecycle-policy-execute-async + client.indexLifecycle() + .deleteSnapshotLifecyclePolicyAsync(deleteRequest, + RequestOptions.DEFAULT, deleteListener); + // end::slm-delete-snapshot-lifecycle-policy-execute-async + + assertTrue(deleteResp.isAcknowledged()); + } + + private void assertSnapshotExists(final RestHighLevelClient client, final String repo, final String snapshotName) throws Exception { + assertBusy(() -> { + GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(new String[]{repo}, new String[]{snapshotName}); + try { + final GetSnapshotsResponse snaps = client.snapshot().get(getSnapshotsRequest, RequestOptions.DEFAULT); + Optional info = snaps.getSnapshots(repo).stream().findFirst(); + if (info.isPresent()) { + info.ifPresent(si -> { + assertThat(si.snapshotId().getName(), equalTo(snapshotName)); + assertThat(si.state(), equalTo(SnapshotState.SUCCESS)); + }); + } else { + fail("unable to find snapshot; " + snapshotName); + } + } catch (Exception e) { + if (e.getMessage().contains("snapshot_missing_exception")) { + fail("snapshot does not exist: " + snapshotName); + } + throw e; + } + }); + } + static Map toMap(Response response) throws IOException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java index ab4f548c926e6..6c8161ac02138 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java @@ -52,6 +52,11 @@ protected boolean supportsUnknownFields() { return false; } + @Override + protected boolean assertToXContentEquivalence() { + return false; + } + @Override protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponseTests.java index 89e580dfd33dd..1cf9bb523a867 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponseTests.java @@ -103,6 +103,11 @@ protected boolean supportsUnknownFields() { return true; } + @Override + protected boolean assertToXContentEquivalence() { + return false; + } + @Override protected Predicate getRandomFieldsExcludeFilter() { return (field) -> diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java index e29fa88d7fe3e..c6614c1751e7e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java @@ -22,13 +22,10 @@ import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.RandomObjects; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; public class AnalyzeResponseTests extends AbstractResponseTestCase { @@ -37,7 +34,7 @@ protected AnalyzeAction.Response createServerTestInstance() { int tokenCount = randomIntBetween(1, 30); AnalyzeAction.AnalyzeToken[] tokens = new AnalyzeAction.AnalyzeToken[tokenCount]; for (int i = 0; i < tokenCount; i++) { - tokens[i] = randomToken(); + tokens[i] = RandomObjects.randomToken(random()); } if (randomBoolean()) { AnalyzeAction.CharFilteredText[] charfilters = null; @@ -62,45 +59,6 @@ protected AnalyzeAction.Response createServerTestInstance() { return new AnalyzeAction.Response(Arrays.asList(tokens), null); } - private AnalyzeAction.AnalyzeToken randomToken() { - String token = randomAlphaOfLengthBetween(1, 20); - int position = randomIntBetween(0, 1000); - int startOffset = randomIntBetween(0, 1000); - int endOffset = randomIntBetween(0, 1000); - int posLength = randomIntBetween(1, 5); - String type = randomAlphaOfLengthBetween(1, 20); - Map extras = new HashMap<>(); - if (randomBoolean()) { - int entryCount = randomInt(6); - for (int i = 0; i < entryCount; i++) { - switch (randomInt(6)) { - case 0: - case 1: - case 2: - case 3: - String key = randomAlphaOfLength(5); - String value = randomAlphaOfLength(10); - extras.put(key, value); - break; - case 4: - String objkey = randomAlphaOfLength(5); - Map obj = new HashMap<>(); - obj.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); - extras.put(objkey, obj); - break; - case 5: - String listkey = randomAlphaOfLength(5); - List list = new ArrayList<>(); - list.add(randomAlphaOfLength(4)); - list.add(randomAlphaOfLength(6)); - extras.put(listkey, list); - break; - } - } - } - return new AnalyzeAction.AnalyzeToken(token, position, startOffset, endOffset, posLength, type, extras); - } - @Override protected AnalyzeResponse doParseToClientInstance(XContentParser parser) throws IOException { return AnalyzeResponse.fromXContent(parser); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java index fd2b692588815..5629ba88c7517 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetBasicStatusResponseTests.java @@ -18,28 +18,29 @@ */ package org.elasticsearch.client.license; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; + +import java.io.IOException; public class GetBasicStatusResponseTests - extends AbstractHlrcStreamableXContentTestCase { - @Override - public GetBasicStatusResponse doHlrcParseInstance(XContentParser parser) { - return GetBasicStatusResponse.fromXContent(parser); - } + extends AbstractResponseTestCase { @Override - public org.elasticsearch.license.GetBasicStatusResponse convertHlrcToInternal(GetBasicStatusResponse instance) { - return new org.elasticsearch.license.GetBasicStatusResponse(instance.isEligibleToStartBasic()); + protected org.elasticsearch.license.GetBasicStatusResponse createServerTestInstance() { + return new org.elasticsearch.license.GetBasicStatusResponse(randomBoolean()); } @Override - protected org.elasticsearch.license.GetBasicStatusResponse createBlankInstance() { - return new org.elasticsearch.license.GetBasicStatusResponse(false); + protected GetBasicStatusResponse doParseToClientInstance(XContentParser parser) throws IOException { + return GetBasicStatusResponse.fromXContent(parser); } @Override - protected org.elasticsearch.license.GetBasicStatusResponse createTestInstance() { - return new org.elasticsearch.license.GetBasicStatusResponse(randomBoolean()); + protected void assertInstances(org.elasticsearch.license.GetBasicStatusResponse serverTestInstance, + GetBasicStatusResponse clientInstance) { + org.elasticsearch.license.GetBasicStatusResponse serverInstance = + new org.elasticsearch.license.GetBasicStatusResponse(clientInstance.isEligibleToStartBasic()); + assertEquals(serverTestInstance, serverInstance); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java index 36a38c40b8bb4..d046c01859824 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/GetTrialStatusResponseTests.java @@ -18,29 +18,29 @@ */ package org.elasticsearch.client.license; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; -public class GetTrialStatusResponseTests extends - AbstractHlrcStreamableXContentTestCase { +import java.io.IOException; - @Override - public GetTrialStatusResponse doHlrcParseInstance(XContentParser parser) { - return GetTrialStatusResponse.fromXContent(parser); - } +public class GetTrialStatusResponseTests extends + AbstractResponseTestCase { @Override - public org.elasticsearch.license.GetTrialStatusResponse convertHlrcToInternal(GetTrialStatusResponse instance) { - return new org.elasticsearch.license.GetTrialStatusResponse(instance.isEligibleToStartTrial()); + protected org.elasticsearch.license.GetTrialStatusResponse createServerTestInstance() { + return new org.elasticsearch.license.GetTrialStatusResponse(randomBoolean()); } @Override - protected org.elasticsearch.license.GetTrialStatusResponse createBlankInstance() { - return new org.elasticsearch.license.GetTrialStatusResponse(false); + protected GetTrialStatusResponse doParseToClientInstance(XContentParser parser) throws IOException { + return GetTrialStatusResponse.fromXContent(parser); } @Override - protected org.elasticsearch.license.GetTrialStatusResponse createTestInstance() { - return new org.elasticsearch.license.GetTrialStatusResponse(randomBoolean()); + protected void assertInstances(org.elasticsearch.license.GetTrialStatusResponse serverTestInstance, + GetTrialStatusResponse clientInstance) { + org.elasticsearch.license.GetTrialStatusResponse serverInstance = + new org.elasticsearch.license.GetTrialStatusResponse(clientInstance.isEligibleToStartTrial()); + assertEquals(serverInstance, serverTestInstance); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ConfusionMatrixMetricConfusionMatrixTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ConfusionMatrixMetricConfusionMatrixTests.java index 28eb221b318c6..b54bcd53fc4a1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ConfusionMatrixMetricConfusionMatrixTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/ConfusionMatrixMetricConfusionMatrixTests.java @@ -26,7 +26,7 @@ public class ConfusionMatrixMetricConfusionMatrixTests extends AbstractXContentTestCase { - static ConfusionMatrixMetric.ConfusionMatrix randomConfusionMatrix() { + public static ConfusionMatrixMetric.ConfusionMatrix randomConfusionMatrix() { return new ConfusionMatrixMetric.ConfusionMatrix(randomInt(), randomInt(), randomInt(), randomInt()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EvaluateDataFrameResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EvaluateDataFrameResponseTests.java index b41d113686ccf..70740a3268f10 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EvaluateDataFrameResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EvaluateDataFrameResponseTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.MeanSquaredErrorMetricResultTests; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -45,6 +46,9 @@ public static EvaluateDataFrameResponse randomResponse() { if (randomBoolean()) { metrics.add(ConfusionMatrixMetricResultTests.randomResult()); } + if (randomBoolean()) { + metrics.add(MeanSquaredErrorMetricResultTests.randomResult()); + } return new EvaluateDataFrameResponse(randomAlphaOfLength(5), metrics); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java index 2ffb7744a3bb4..2fa57aa9dd952 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/MlInfoActionResponseTests.java @@ -18,35 +18,20 @@ */ package org.elasticsearch.client.ml; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.xpack.core.ml.action.MlInfoAction.Response; import java.io.IOException; import java.util.HashMap; import java.util.Map; -import java.util.function.Predicate; -public class MlInfoActionResponseTests extends - AbstractHlrcStreamableXContentTestCase { +import static org.hamcrest.Matchers.equalTo; - @Override - public MlInfoResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return MlInfoResponse.fromXContent(parser); - } +public class MlInfoActionResponseTests extends AbstractResponseTestCase { @Override - public Response convertHlrcToInternal(MlInfoResponse instance) { - return new Response(instance.getInfo()); - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - return p -> true; - } - - @Override - protected Response createTestInstance() { + protected Response createServerTestInstance() { int size = randomInt(10); Map info = new HashMap<>(); for (int j = 0; j < size; j++) { @@ -56,7 +41,12 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected MlInfoResponse doParseToClientInstance(XContentParser parser) throws IOException { + return MlInfoResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(Response serverTestInstance, MlInfoResponse clientInstance) { + assertThat(serverTestInstance.getInfo(), equalTo(clientInstance.getInfo())); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java index fd2b019c613dd..d5ef3dbcc0b1d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarActionResponseTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.client.ml; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; import org.elasticsearch.xpack.core.ml.calendars.Calendar; @@ -28,46 +28,13 @@ import java.util.ArrayList; import java.util.List; -public class PutCalendarActionResponseTests - extends AbstractHlrcStreamableXContentTestCase { +import static org.hamcrest.Matchers.equalTo; - @Override - protected PutCalendarAction.Response createTestInstance() { - return new PutCalendarAction.Response(testInstance()); - } - - @Override - protected PutCalendarAction.Response doParseInstance(XContentParser parser) throws IOException { - return new PutCalendarAction.Response(Calendar.LENIENT_PARSER.parse(parser, null).build()); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - @Override - public PutCalendarResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return PutCalendarResponse.fromXContent(parser); - } - - @Override - public PutCalendarAction.Response convertHlrcToInternal(PutCalendarResponse instance) { - org.elasticsearch.client.ml.calendars.Calendar hlrcCalendar = instance.getCalendar(); - Calendar internalCalendar = new Calendar(hlrcCalendar.getId(), hlrcCalendar.getJobIds(), hlrcCalendar.getDescription()); - return new PutCalendarAction.Response(internalCalendar); - } +public class PutCalendarActionResponseTests extends AbstractResponseTestCase { @Override - protected PutCalendarAction.Response createBlankInstance() { - return new PutCalendarAction.Response(); - } - - public static Calendar testInstance() { - return testInstance(new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()).ofCodePointsLength(random(), 10, 10)); - } - - public static Calendar testInstance(String calendarId) { + protected PutCalendarAction.Response createServerTestInstance() { + String calendarId = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()).ofCodePointsLength(random(), 10, 10); int size = randomInt(10); List items = new ArrayList<>(size); for (int i = 0; i < size; i++) { @@ -77,6 +44,20 @@ public static Calendar testInstance(String calendarId) { if (randomBoolean()) { description = randomAlphaOfLength(20); } - return new Calendar(calendarId, items, description); + Calendar calendar = new Calendar(calendarId, items, description); + return new PutCalendarAction.Response(calendar); + } + + @Override + protected PutCalendarResponse doParseToClientInstance(XContentParser parser) throws IOException { + return PutCalendarResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(PutCalendarAction.Response serverTestInstance, PutCalendarResponse clientInstance) { + org.elasticsearch.client.ml.calendars.Calendar hlrcCalendar = clientInstance.getCalendar(); + Calendar internalCalendar = new Calendar(hlrcCalendar.getId(), hlrcCalendar.getJobIds(), hlrcCalendar.getDescription()); + PutCalendarAction.Response convertedServerTestInstance =new PutCalendarAction.Response(internalCalendar); + assertThat(convertedServerTestInstance, equalTo(serverTestInstance)); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStatsTests.java index 0a5134606da45..cde92b78f6c16 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStatsTests.java @@ -27,13 +27,15 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class DatafeedTimingStatsTests extends AbstractXContentTestCase { private static final String JOB_ID = "my-job-id"; public static DatafeedTimingStats createRandomInstance() { - return new DatafeedTimingStats(randomAlphaOfLength(10), randomLong(), randomDouble()); + return new DatafeedTimingStats( + randomAlphaOfLength(10), randomLong(), randomLong(), randomDouble(), randomBoolean() ? null : randomDouble()); } @Override @@ -59,14 +61,16 @@ public void testParse_OptionalFieldsAbsent() throws IOException { DatafeedTimingStats stats = DatafeedTimingStats.PARSER.apply(parser, null); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getSearchCount(), equalTo(0L)); + assertThat(stats.getBucketCount(), equalTo(0L)); assertThat(stats.getTotalSearchTimeMs(), equalTo(0.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), nullValue()); } } public void testEquals() { - DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 100.0); - DatafeedTimingStats stats2 = new DatafeedTimingStats(JOB_ID, 5, 100.0); - DatafeedTimingStats stats3 = new DatafeedTimingStats(JOB_ID, 5, 200.0); + DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0, 20.0); + DatafeedTimingStats stats2 = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0, 20.0); + DatafeedTimingStats stats3 = new DatafeedTimingStats(JOB_ID, 5, 10, 200.0, 20.0); assertTrue(stats1.equals(stats1)); assertTrue(stats1.equals(stats2)); @@ -74,9 +78,9 @@ public void testEquals() { } public void testHashCode() { - DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 100.0); - DatafeedTimingStats stats2 = new DatafeedTimingStats(JOB_ID, 5, 100.0); - DatafeedTimingStats stats3 = new DatafeedTimingStats(JOB_ID, 5, 200.0); + DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0, 20.0); + DatafeedTimingStats stats2 = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0, 20.0); + DatafeedTimingStats stats3 = new DatafeedTimingStats(JOB_ID, 5, 10, 200.0, 20.0); assertEquals(stats1.hashCode(), stats1.hashCode()); assertEquals(stats1.hashCode(), stats2.hashCode()); @@ -84,9 +88,11 @@ public void testHashCode() { } public void testConstructorAndGetters() { - DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 123.456); + DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 10, 123.456, 78.9); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getSearchCount(), equalTo(5L)); + assertThat(stats.getBucketCount(), equalTo(10L)); assertThat(stats.getTotalSearchTimeMs(), equalTo(123.456)); + assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(78.9)); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetricResultTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetricResultTests.java new file mode 100644 index 0000000000000..290938ba37048 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetricResultTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.regression; + +import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class MeanSquaredErrorMetricResultTests extends AbstractXContentTestCase { + + public static MeanSquaredErrorMetric.Result randomResult() { + return new MeanSquaredErrorMetric.Result(randomDouble()); + } + + @Override + protected MeanSquaredErrorMetric.Result createTestInstance() { + return randomResult(); + } + + @Override + protected MeanSquaredErrorMetric.Result doParseInstance(XContentParser parser) throws IOException { + return MeanSquaredErrorMetric.Result.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetricTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetricTests.java new file mode 100644 index 0000000000000..9027462b21e75 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/MeanSquaredErrorMetricTests.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.regression; + +import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class MeanSquaredErrorMetricTests extends AbstractXContentTestCase { + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + } + + @Override + protected MeanSquaredErrorMetric createTestInstance() { + return new MeanSquaredErrorMetric(); + } + + @Override + protected MeanSquaredErrorMetric doParseInstance(XContentParser parser) throws IOException { + return MeanSquaredErrorMetric.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetricResultTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetricResultTests.java new file mode 100644 index 0000000000000..3d18418a752e3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetricResultTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.regression; + +import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class RSquaredMetricResultTests extends AbstractXContentTestCase { + + public static RSquaredMetric.Result randomResult() { + return new RSquaredMetric.Result(randomDouble()); + } + + @Override + protected RSquaredMetric.Result createTestInstance() { + return randomResult(); + } + + @Override + protected RSquaredMetric.Result doParseInstance(XContentParser parser) throws IOException { + return RSquaredMetric.Result.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetricTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetricTests.java new file mode 100644 index 0000000000000..ab8b9e0f7afbe --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RSquaredMetricTests.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.regression; + +import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class RSquaredMetricTests extends AbstractXContentTestCase { + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + } + + @Override + protected RSquaredMetric createTestInstance() { + return new RSquaredMetric(); + } + + @Override + protected RSquaredMetric doParseInstance(XContentParser parser) throws IOException { + return RSquaredMetric.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RegressionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RegressionTests.java new file mode 100644 index 0000000000000..89e4823b93e75 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RegressionTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.regression; + +import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; + +public class RegressionTests extends AbstractXContentTestCase { + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + } + + @Override + protected Regression createTestInstance() { + List metrics = new ArrayList<>(); + if (randomBoolean()) { + metrics.add(new MeanSquaredErrorMetric()); + } + if (randomBoolean()) { + metrics.add(new RSquaredMetric()); + } + return randomBoolean() ? + new Regression(randomAlphaOfLength(10), randomAlphaOfLength(10)) : + new Regression(randomAlphaOfLength(10), randomAlphaOfLength(10), metrics.isEmpty() ? null : metrics); + } + + @Override + protected Regression doParseInstance(XContentParser parser) throws IOException { + return Regression.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // allow unknown fields in the root of the object only + return field -> !field.isEmpty(); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java new file mode 100644 index 0000000000000..2fb8a21e3a1da --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe.evaluation.softclassification; + +import org.elasticsearch.client.ml.dataframe.evaluation.EvaluationMetric; +import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Predicate; + +public class BinarySoftClassificationTests extends AbstractXContentTestCase { + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + } + + @Override + protected BinarySoftClassification createTestInstance() { + List metrics = new ArrayList<>(); + if (randomBoolean()) { + metrics.add(new AucRocMetric(randomBoolean())); + } + if (randomBoolean()) { + metrics.add(new PrecisionMetric(Arrays.asList(randomArray(1, + 4, + Double[]::new, + BinarySoftClassificationTests::randomDouble)))); + } + if (randomBoolean()) { + metrics.add(new RecallMetric(Arrays.asList(randomArray(1, + 4, + Double[]::new, + BinarySoftClassificationTests::randomDouble)))); + } + if (randomBoolean()) { + metrics.add(new ConfusionMatrixMetric(Arrays.asList(randomArray(1, + 4, + Double[]::new, + BinarySoftClassificationTests::randomDouble)))); + } + return randomBoolean() ? + new BinarySoftClassification(randomAlphaOfLength(10), randomAlphaOfLength(10)) : + new BinarySoftClassification(randomAlphaOfLength(10), randomAlphaOfLength(10), metrics.isEmpty() ? null : metrics); + } + + @Override + protected BinarySoftClassification doParseInstance(XContentParser parser) throws IOException { + return BinarySoftClassification.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // allow unknown fields in the root of the object only + return field -> !field.isEmpty(); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/TimingStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/TimingStatsTests.java index 386bc6ac0b3dd..144cf9548e6ff 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/TimingStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/TimingStatsTests.java @@ -18,9 +18,14 @@ */ package org.elasticsearch.client.ml.job.process; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.AbstractXContentTestCase; +import java.io.IOException; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -32,6 +37,7 @@ public static TimingStats createTestInstance(String jobId) { return new TimingStats( jobId, randomLong(), + randomDouble(), randomBoolean() ? null : randomDouble(), randomBoolean() ? null : randomDouble(), randomBoolean() ? null : randomDouble(), @@ -54,10 +60,11 @@ protected boolean supportsUnknownFields() { } public void testConstructor() { - TimingStats stats = new TimingStats(JOB_ID, 7, 1.0, 2.0, 1.23, 7.89); + TimingStats stats = new TimingStats(JOB_ID, 7, 8.61, 1.0, 2.0, 1.23, 7.89); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getBucketCount(), equalTo(7L)); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(8.61)); assertThat(stats.getMinBucketProcessingTimeMs(), equalTo(1.0)); assertThat(stats.getMaxBucketProcessingTimeMs(), equalTo(2.0)); assertThat(stats.getAvgBucketProcessingTimeMs(), equalTo(1.23)); @@ -65,20 +72,37 @@ public void testConstructor() { } public void testConstructor_NullValues() { - TimingStats stats = new TimingStats(JOB_ID, 7, null, null, null, null); + TimingStats stats = new TimingStats(JOB_ID, 7, 8.61, null, null, null, null); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getBucketCount(), equalTo(7L)); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(8.61)); assertThat(stats.getMinBucketProcessingTimeMs(), nullValue()); assertThat(stats.getMaxBucketProcessingTimeMs(), nullValue()); assertThat(stats.getAvgBucketProcessingTimeMs(), nullValue()); assertThat(stats.getExponentialAvgBucketProcessingTimeMs(), nullValue()); } + public void testParse_OptionalFieldsAbsent() throws IOException { + String json = "{\"job_id\": \"my-job-id\"}"; + try (XContentParser parser = + XContentFactory.xContent(XContentType.JSON).createParser( + xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json)) { + TimingStats stats = TimingStats.PARSER.apply(parser, null); + assertThat(stats.getJobId(), equalTo(JOB_ID)); + assertThat(stats.getBucketCount(), equalTo(0L)); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(0.0)); + assertThat(stats.getMinBucketProcessingTimeMs(), nullValue()); + assertThat(stats.getMaxBucketProcessingTimeMs(), nullValue()); + assertThat(stats.getAvgBucketProcessingTimeMs(), nullValue()); + assertThat(stats.getExponentialAvgBucketProcessingTimeMs(), nullValue()); + } + } + public void testEquals() { - TimingStats stats1 = new TimingStats(JOB_ID, 7, 1.0, 2.0, 1.23, 7.89); - TimingStats stats2 = new TimingStats(JOB_ID, 7, 1.0, 2.0, 1.23, 7.89); - TimingStats stats3 = new TimingStats(JOB_ID, 7, 1.0, 3.0, 1.23, 7.89); + TimingStats stats1 = new TimingStats(JOB_ID, 7, 8.61, 1.0, 2.0, 1.23, 7.89); + TimingStats stats2 = new TimingStats(JOB_ID, 7, 8.61, 1.0, 2.0, 1.23, 7.89); + TimingStats stats3 = new TimingStats(JOB_ID, 7, 8.61, 1.0, 3.0, 1.23, 7.89); assertTrue(stats1.equals(stats1)); assertTrue(stats1.equals(stats2)); @@ -86,9 +110,9 @@ public void testEquals() { } public void testHashCode() { - TimingStats stats1 = new TimingStats(JOB_ID, 7, 1.0, 2.0, 1.23, 7.89); - TimingStats stats2 = new TimingStats(JOB_ID, 7, 1.0, 2.0, 1.23, 7.89); - TimingStats stats3 = new TimingStats(JOB_ID, 7, 1.0, 3.0, 1.23, 7.89); + TimingStats stats1 = new TimingStats(JOB_ID, 7, 8.61, 1.0, 2.0, 1.23, 7.89); + TimingStats stats2 = new TimingStats(JOB_ID, 7, 8.61, 1.0, 2.0, 1.23, 7.89); + TimingStats stats3 = new TimingStats(JOB_ID, 7, 8.61, 1.0, 3.0, 1.23, 7.89); assertEquals(stats1.hashCode(), stats1.hashCode()); assertEquals(stats1.hashCode(), stats2.hashCode()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java index 94e326e10555e..59228989e6949 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/hlrc/HasPrivilegesResponseTests.java @@ -19,18 +19,15 @@ package org.elasticsearch.client.security.hlrc; -import org.elasticsearch.Version; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.security.HasPrivilegesResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.junit.Assert; @@ -48,7 +45,7 @@ import static org.hamcrest.Matchers.equalTo; -public class HasPrivilegesResponseTests extends AbstractHlrcStreamableXContentTestCase< +public class HasPrivilegesResponseTests extends AbstractResponseTestCase< org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse, HasPrivilegesResponse> { @@ -85,60 +82,21 @@ public void testToXContent() throws Exception { } @Override - protected boolean supportsUnknownFields() { - // Because we have nested objects with { string : boolean }, unknown fields cause parsing problems - return false; - } - - @Override - protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createBlankInstance() { - return new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse(); - } - - @Override - protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createTestInstance() { + protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createServerTestInstance() { return randomResponse(); } @Override - public HasPrivilegesResponse doHlrcParseInstance(XContentParser parser) throws IOException { + protected HasPrivilegesResponse doParseToClientInstance(XContentParser parser) throws IOException { return HasPrivilegesResponse.fromXContent(parser); } - @Override - public org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse convertHlrcToInternal(HasPrivilegesResponse hlrc) { - return new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse( - hlrc.getUsername(), - hlrc.hasAllRequested(), - hlrc.getClusterPrivileges(), - toResourcePrivileges(hlrc.getIndexPrivileges()), - hlrc.getApplicationPrivileges().entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> toResourcePrivileges(e.getValue()))) - ); - } - private static List toResourcePrivileges(Map> map) { return map.entrySet().stream() .map(e -> ResourcePrivileges.builder(e.getKey()).addPrivileges(e.getValue()).build()) .collect(Collectors.toList()); } - private org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse serializeAndDeserialize( - org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse original, Version version) throws IOException { - logger.info("Test serialize/deserialize with version {}", version); - final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(version); - original.writeTo(out); - - final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse copy = - new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse(); - final StreamInput in = out.bytes().streamInput(); - in.setVersion(version); - copy.readFrom(in); - Assert.assertThat(in.read(), equalTo(-1)); - return copy; - } - private org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse randomResponse() { final String username = randomAlphaOfLengthBetween(4, 12); final Map cluster = new HashMap<>(); @@ -168,4 +126,19 @@ private Collection randomResourcePrivileges() { } return list; } + + @Override + protected void assertInstances(org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse serverTestInstance, + HasPrivilegesResponse hlrc) { + org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse other = + new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse( + hlrc.getUsername(), + hlrc.hasAllRequested(), + hlrc.getClusterPrivileges(), + toResourcePrivileges(hlrc.getIndexPrivileges()), + hlrc.getApplicationPrivileges().entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> toResourcePrivileges(e.getValue()))) + ); + assertEquals(serverTestInstance, other); + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java index 8bc8d73fd2b26..4aa69d3b3e9c1 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java @@ -134,7 +134,7 @@ private static SSLContext getSslContext() throws Exception { * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK */ private static String getProtocol() { - String version = AccessController.doPrivileged((PrivilegedAction) () -> System.getProperty("java.specification.version")); + String version = AccessController.doPrivileged((PrivilegedAction) () -> System.getProperty("java.version")); String[] components = version.split("\\."); if (components.length > 0) { final int major = Integer.valueOf(components[0]); diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 46f456c8c0493..4efaa8c0a90fd 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -4,18 +4,20 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin -apply plugin: 'base' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.test.fixtures' configurations { dockerPlugins dockerSource ossDockerSource + restSpec } dependencies { dockerSource project(path: ":distribution:archives:linux-tar") ossDockerSource project(path: ":distribution:archives:oss-linux-tar") + restSpec project(':rest-api-spec') } ext.expansions = { oss, local -> @@ -77,20 +79,65 @@ void addCopyDockerContextTask(final boolean oss) { } } +def createAndSetWritable (Object... locations) { + locations.each { location -> + File file = file(location) + file.mkdirs() + file.setWritable(true, false) + } +} + +task copyKeystore(type: Sync) { + from project(':x-pack:plugin:core') + .file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + into "${buildDir}/certs" + doLast { + file("${buildDir}/certs").setReadable(true, false) + file("${buildDir}/certs/testnode.jks").setReadable(true, false) + } +} + preProcessFixture { + if (TestFixturesPlugin.dockerComposeSupported()) { + dependsOn assemble + } + dependsOn copyKeystore + doLast { + // tests expect to have an empty repo + project.delete( + "${buildDir}/repo", + "${buildDir}/oss-repo" + ) + createAndSetWritable( + "${buildDir}/repo", + "${buildDir}/oss-repo", + "${buildDir}/logs/default-1", + "${buildDir}/logs/default-2", + "${buildDir}/logs/oss-1", + "${buildDir}/logs/oss-2" + ) + } +} + +processTestResources { + from ({ zipTree(configurations.restSpec.singleFile) }) { + include 'rest-api-spec/api/**' + } + from project(':x-pack:plugin:core') + .file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + dependsOn configurations.restSpec // don't add the tasks to build the docker images if we have no way of testing them if (TestFixturesPlugin.dockerComposeSupported()) { dependsOn assemble } } -postProcessFixture.doLast { - println "docker default distro is on port: ${ext."test.fixtures.elasticsearch-default.tcp.9200"}, " + - "oss is on: ${ext."test.fixtures.elasticsearch-oss.tcp.9200"}" +task integTest(type: Test) { + maxParallelForks = '1' + include '**/*IT.class' } -// TODO: Add some actual tests, this will just check that the TPC port in the container is up -check.dependsOn postProcessFixture +check.dependsOn integTest void addBuildDockerImage(final boolean oss) { final Task buildDockerImageTask = task(taskName("build", oss, "DockerImage"), type: LoggedExec) { diff --git a/distribution/docker/docker-build-context/src/docker/config/log4j2.properties b/distribution/docker/docker-build-context/src/docker/config/log4j2.properties index 40be55d2e1c3a..6727192999c52 100644 --- a/distribution/docker/docker-build-context/src/docker/config/log4j2.properties +++ b/distribution/docker/docker-build-context/src/docker/config/log4j2.properties @@ -16,6 +16,7 @@ appender.deprecation_rolling.type = Console appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.layout.type = ESJsonLayout appender.deprecation_rolling.layout.type_name = deprecation +appender.deprecation_rolling.layout.esmessagefields=x-opaque-id logger.deprecation.name = org.elasticsearch.deprecation logger.deprecation.level = warn @@ -26,6 +27,7 @@ appender.index_search_slowlog_rolling.type = Console appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.layout.type = ESJsonLayout appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog +appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,stats,search_type,total_shards,source,id logger.index_search_slowlog_rolling.name = index.search.slowlog logger.index_search_slowlog_rolling.level = trace @@ -36,6 +38,7 @@ appender.index_indexing_slowlog_rolling.type = Console appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog +appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace diff --git a/distribution/docker/docker-compose.yml b/distribution/docker/docker-compose.yml index 3207afd501aaf..ec6730f6dd558 100644 --- a/distribution/docker/docker-compose.yml +++ b/distribution/docker/docker-compose.yml @@ -1,17 +1,133 @@ # Only used for testing the docker images version: '3' services: - elasticsearch-default: + elasticsearch-default-1: image: elasticsearch:test environment: + - node.name=elasticsearch-default-1 + - cluster.initial_master_nodes=elasticsearch-default-1,elasticsearch-default-2 + - discovery.seed_hosts=elasticsearch-default-2:9300 - cluster.name=elasticsearch-default - - discovery.type=single-node + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - path.repo=/tmp/es-repo + - node.attr.testattr=test + - cluster.routing.allocation.disk.watermark.low=1b + - cluster.routing.allocation.disk.watermark.high=1b + - cluster.routing.allocation.disk.watermark.flood_stage=1b + - script.max_compilations_rate=2048/1m + - node.store.allow_mmap=false + - xpack.security.enabled=true + - xpack.security.transport.ssl.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.authc.token.enabled=true + - xpack.security.audit.enabled=true + - xpack.security.authc.realms.file.file1.order=0 + - xpack.security.authc.realms.native.native1.order=1 + - xpack.security.transport.ssl.keystore.path=/usr/share/elasticsearch/config/testnode.jks + - xpack.security.http.ssl.keystore.path=/usr/share/elasticsearch/config/testnode.jks + - xpack.http.ssl.verification_mode=certificate + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=trial + volumes: + - ./build/repo:/tmp/es-repo + - ./build/certs/testnode.jks:/usr/share/elasticsearch/config/testnode.jks + - ./build/logs/default-1:/usr/share/elasticsearch/logs + - ./docker-test-entrypoint.sh:/docker-test-entrypoint.sh ports: - "9200" - elasticsearch-oss: - image: elasticsearch-oss:test + ulimits: + memlock: + soft: -1 + hard: -1 + entrypoint: /docker-test-entrypoint.sh + elasticsearch-default-2: + image: elasticsearch:test + environment: + - node.name=elasticsearch-default-2 + - cluster.initial_master_nodes=elasticsearch-default-1,elasticsearch-default-2 + - discovery.seed_hosts=elasticsearch-default-1:9300 + - cluster.name=elasticsearch-default + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - path.repo=/tmp/es-repo + - node.attr.testattr=test + - cluster.routing.allocation.disk.watermark.low=1b + - cluster.routing.allocation.disk.watermark.high=1b + - cluster.routing.allocation.disk.watermark.flood_stage=1b + - script.max_compilations_rate=2048/1m + - node.store.allow_mmap=false + - xpack.security.enabled=true + - xpack.security.transport.ssl.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.authc.token.enabled=true + - xpack.security.audit.enabled=true + - xpack.security.authc.realms.file.file1.order=0 + - xpack.security.authc.realms.native.native1.order=1 + - xpack.security.transport.ssl.keystore.path=/usr/share/elasticsearch/config/testnode.jks + - xpack.security.http.ssl.keystore.path=/usr/share/elasticsearch/config/testnode.jks + - xpack.http.ssl.verification_mode=certificate + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=trial + volumes: + - ./build/repo:/tmp/es-repo + - ./build/certs/testnode.jks:/usr/share/elasticsearch/config/testnode.jks + - ./build/logs/default-2:/usr/share/elasticsearch/logs + - ./docker-test-entrypoint.sh:/docker-test-entrypoint.sh + ports: + - "9200" + ulimits: + memlock: + soft: -1 + hard: -1 + entrypoint: /docker-test-entrypoint.sh + elasticsearch-oss-1: + image: elasticsearch:test + environment: + - node.name=elasticsearch-oss-1 + - cluster.initial_master_nodes=elasticsearch-oss-1,elasticsearch-oss-2 + - discovery.seed_hosts=elasticsearch-oss-2:9300 + - cluster.name=elasticsearch-oss + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - path.repo=/tmp/es-repo + - node.attr.testattr=test + - cluster.routing.allocation.disk.watermark.low=1b + - cluster.routing.allocation.disk.watermark.high=1b + - cluster.routing.allocation.disk.watermark.flood_stage=1b + - script.max_compilations_rate=2048/1m + - node.store.allow_mmap=false + volumes: + - ./build/oss-repo:/tmp/es-repo + - ./build/logs/oss-1:/usr/share/elasticsearch/logs + ports: + - "9200" + ulimits: + memlock: + soft: -1 + hard: -1 + elasticsearch-oss-2: + image: elasticsearch:test environment: + - node.name=elasticsearch-oss-2 + - cluster.initial_master_nodes=elasticsearch-oss-1,elasticsearch-oss-2 + - discovery.seed_hosts=elasticsearch-oss-1:9300 - cluster.name=elasticsearch-oss - - discovery.type=single-node + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - path.repo=/tmp/es-repo + - node.attr.testattr=test + - cluster.routing.allocation.disk.watermark.low=1b + - cluster.routing.allocation.disk.watermark.high=1b + - cluster.routing.allocation.disk.watermark.flood_stage=1b + - script.max_compilations_rate=2048/1m + - node.store.allow_mmap=false + volumes: + - ./build/oss-repo:/tmp/es-repo + - ./build/logs/oss-2:/usr/share/elasticsearch/logs ports: - "9200" + ulimits: + memlock: + soft: -1 + hard: -1 diff --git a/distribution/docker/docker-test-entrypoint.sh b/distribution/docker/docker-test-entrypoint.sh new file mode 100755 index 0000000000000..68160cffd1cee --- /dev/null +++ b/distribution/docker/docker-test-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/bash +cd /usr/share/elasticsearch/bin/ +./elasticsearch-users useradd x_pack_rest_user -p x-pack-test-password -r superuser || true +./elasticsearch-keystore create +echo "testnode" > /tmp/password +cat /tmp/password | ./elasticsearch-keystore add -x -f -v 'xpack.security.transport.ssl.keystore.secure_password' +cat /tmp/password | ./elasticsearch-keystore add -x -f -v 'xpack.security.http.ssl.keystore.secure_password' +/usr/local/bin/docker-entrypoint.sh | tee > /usr/share/elasticsearch/logs/console.log diff --git a/distribution/docker/src/test/java/org/elasticsearch/docker/test/DockerYmlTestSuiteIT.java b/distribution/docker/src/test/java/org/elasticsearch/docker/test/DockerYmlTestSuiteIT.java new file mode 100644 index 0000000000000..3a88301828096 --- /dev/null +++ b/distribution/docker/src/test/java/org/elasticsearch/docker/test/DockerYmlTestSuiteIT.java @@ -0,0 +1,163 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.docker.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.Request; +import org.elasticsearch.common.CharArrays; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.CharBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Base64; + +public class DockerYmlTestSuiteIT extends ESClientYamlSuiteTestCase { + + private static final String USER = "x_pack_rest_user"; + private static final String PASS = "x-pack-test-password"; + private static final String KEYSTORE_PASS = "testnode"; + + public DockerYmlTestSuiteIT(ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(); + } + + @Override + protected String getTestRestCluster() { + String distribution = getDistribution(); + return new StringBuilder() + .append("localhost:") + .append(getProperty("test.fixtures.elasticsearch-" + distribution + "-1.tcp.9200")) + .append(",") + .append("localhost:") + .append(getProperty("test.fixtures.elasticsearch-" + distribution + "-2.tcp.9200")) + .toString(); + } + + @Override + protected boolean randomizeContentType() { + return false; + } + + private String getDistribution() { + String distribution = System.getProperty("tests.distribution", "default"); + if (distribution.equals("oss") == false && distribution.equals("default") == false) { + throw new IllegalArgumentException("supported values for tests.distribution are oss or default but it was " + distribution); + } + return distribution; + } + + private boolean isOss() { + return getDistribution().equals("oss"); + } + + private String getProperty(String key) { + String value = System.getProperty(key); + if (value == null) { + throw new IllegalStateException("Could not find system properties from test.fixtures. " + + "This test expects to run with the elasticsearch.test.fixtures Gradle plugin"); + } + return value; + } + + @Before + public void waitForCluster() throws IOException { + super.initClient(); + Request health = new Request("GET", "/_cluster/health"); + health.addParameter("wait_for_nodes", "2"); + health.addParameter("wait_for_status", "yellow"); + client().performRequest(health); + } + + static Path keyStore; + + @BeforeClass + public static void getKeyStore() { + try { + keyStore = PathUtils.get(DockerYmlTestSuiteIT.class.getResource("/testnode.jks").toURI()); + } catch (URISyntaxException e) { + throw new ElasticsearchException("exception while reading the store", e); + } + if (Files.exists(keyStore) == false) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + } + + @AfterClass + public static void clearKeyStore() { + keyStore = null; + } + + @Override + protected Settings restClientSettings() { + if (isOss()) { + return super.restClientSettings(); + } + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .put(ESRestTestCase.TRUSTSTORE_PATH, keyStore) + .put(ESRestTestCase.TRUSTSTORE_PASSWORD, KEYSTORE_PASS) + .build(); + } + + @Override + protected String getProtocol() { + if (isOss()) { + return "http"; + } + return "https"; + } + + private static String basicAuthHeaderValue(String username, SecureString passwd) { + CharBuffer chars = CharBuffer.allocate(username.length() + passwd.length() + 1); + byte[] charBytes = null; + try { + chars.put(username).put(':').put(passwd.getChars()); + charBytes = CharArrays.toUtf8Bytes(chars.array()); + + //TODO we still have passwords in Strings in headers. Maybe we can look into using a CharSequence? + String basicToken = Base64.getEncoder().encodeToString(charBytes); + return "Basic " + basicToken; + } finally { + Arrays.fill(chars.array(), (char) 0); + if (charBytes != null) { + Arrays.fill(charBytes, (byte) 0); + } + } + } +} diff --git a/distribution/docker/src/test/resources/rest-api-spec/test/10_info.yml b/distribution/docker/src/test/resources/rest-api-spec/test/10_info.yml new file mode 100644 index 0000000000000..cf746ce5ba0b6 --- /dev/null +++ b/distribution/docker/src/test/resources/rest-api-spec/test/10_info.yml @@ -0,0 +1,11 @@ +--- +"Info": + - do: {info: {}} + - is_true: name + - is_true: cluster_name + - is_true: cluster_uuid + - is_true: tagline + - is_true: version + - is_true: version.number + - match: { version.build_type: "docker" } + diff --git a/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml b/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml new file mode 100644 index 0000000000000..1004e19b6b5ae --- /dev/null +++ b/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml @@ -0,0 +1,108 @@ +--- +"Test cat nodes output": + + - do: + cat.nodes: {} + + - match: + $body: | + / #ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name + ^ ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)?\s+ ((-)?\d*(\.\d+)?)? \s+ (-|[dmi]{1,3}) \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ + + - do: + cat.nodes: + v: true + + - match: + $body: | + /^ ip \s+ heap\.percent \s+ ram\.percent \s+ cpu \s+ load_1m \s+ load_5m \s+ load_15m \s+ node\.role \s+ master \s+ name \n + ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ (-|[dmi]{1,3}) \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ + + - do: + cat.nodes: + h: heap.current,heap.percent,heap.max + v: true + + - match: + $body: | + /^ heap\.current \s+ heap\.percent \s+ heap\.max \n + (\s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+ \s+ \d+(\.\d+)?[ptgmk]?b \n)+ $/ + + - do: + cat.nodes: + h: heap.* + v: true + + - match: + $body: | + /^ heap\.current \s+ heap\.percent \s+ heap\.max \n + (\s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+ \s+ \d+(\.\d+)?[ptgmk]?b \n)+ $/ + + - do: + cat.nodes: + h: file_desc.current,file_desc.percent,file_desc.max + v: true + + - match: + # Windows reports -1 for the file descriptor counts. + $body: | + /^ file_desc\.current \s+ file_desc\.percent \s+ file_desc\.max \n + (\s+ (-1|\d+) \s+ \d+ \s+ (-1|\d+) \n)+ $/ + + - do: + cat.nodes: + h: http + v: true + + - match: + $body: | + /^ http \n ((\d{1,3}\.){3}\d{1,3}:\d{1,5}\n)+ $/ + +--- +"Additional disk information": + - do: + cat.nodes: + h: diskAvail,diskTotal,diskUsed,diskUsedPercent + v: true + + - match: + # leading whitespace on columns and optional whitespace on values is necessary + # because `diskAvail` is right aligned and text representation of disk size might be + # longer so it's padded with leading whitespace + $body: | + /^ \s* diskAvail \s+ diskTotal \s+ diskUsed \s+ diskUsedPercent \n + (\s* \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b\s+ (100\.00 | \d{1,2}\.\d{2}) \n)+ $/ + + - do: + cat.nodes: + h: disk,dt,du,dup + v: true + + - match: + # leading whitespace on columns and optional whitespace on values is necessary + # because `disk` is right aligned and text representation of disk size might be + # longer so it's padded with leading whitespace + $body: | + /^ \s* disk \s+ dt \s+ du \s+ dup \n + (\s* \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b\s+ (100\.00 | \d{1,2}\.\d{2}) \n)+ $/ + +--- +"Test cat nodes output with full_id set": + + - do: + cat.nodes: + h: id + # check for a 4 char non-whitespace character string + - match: + $body: | + /^(\S{4}\n)+$/ + + - do: + cat.nodes: + h: id + full_id: true + # check for a 5+ char non-whitespace character string + - match: + $body: | + /^(\S{5,}\n)+$/ + diff --git a/distribution/src/bin/elasticsearch-cli b/distribution/src/bin/elasticsearch-cli index ae0c88b2043e0..4af827b67caf9 100644 --- a/distribution/src/bin/elasticsearch-cli +++ b/distribution/src/bin/elasticsearch-cli @@ -16,6 +16,10 @@ do ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/$additional_classpath_directory/*" done +# use a small heap size for the CLI tools, and thus the serial collector to +# avoid stealing many CPU cycles; a user can override by setting ES_JAVA_OPTS +ES_JAVA_OPTS="-Xms4m -Xmx64m -XX:+UseSerialGC ${ES_JAVA_OPTS}" + exec \ "$JAVA" \ $ES_JAVA_OPTS \ diff --git a/distribution/src/bin/elasticsearch-cli.bat b/distribution/src/bin/elasticsearch-cli.bat index 405f97ccc8dbf..80b488c66e98c 100644 --- a/distribution/src/bin/elasticsearch-cli.bat +++ b/distribution/src/bin/elasticsearch-cli.bat @@ -12,6 +12,10 @@ if defined ES_ADDITIONAL_CLASSPATH_DIRECTORIES ( ) ) +rem use a small heap size for the CLI tools, and thus the serial collector to +rem avoid stealing many CPU cycles; a user can override by setting ES_JAVA_OPTS +set ES_JAVA_OPTS=-Xms4m -Xmx64m -XX:+UseSerialGC %ES_JAVA_OPTS% + %JAVA% ^ %ES_JAVA_OPTS% ^ -Des.path.home="%ES_HOME%" ^ diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index 2f9c280743dfb..fd4d4b666dba8 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -106,6 +106,10 @@ if exist "%JAVA_HOME%\bin\server\jvm.dll" ( ) :foundJVM +if not defined ES_TMPDIR ( + for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory"`) do set ES_TMPDIR=%%a +) + set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options if not "%ES_JAVA_OPTS%" == "" set ES_JAVA_OPTS=%ES_JAVA_OPTS: =;% diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 45bf720902c1c..d3398fa1b5d87 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -67,6 +67,7 @@ appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json appender.deprecation_rolling.layout.type = ESJsonLayout appender.deprecation_rolling.layout.type_name = deprecation +appender.deprecation_rolling.layout.esmessagefields=x-opaque-id appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz appender.deprecation_rolling.policies.type = Policies @@ -103,6 +104,7 @@ appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:f .cluster_name}_index_search_slowlog.json appender.index_search_slowlog_rolling.layout.type = ESJsonLayout appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog +appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,stats,search_type,total_shards,source,id appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ .cluster_name}_index_search_slowlog-%i.json.gz @@ -141,6 +143,7 @@ appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys _index_indexing_slowlog.json appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog +appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ _index_indexing_slowlog-%i.json.gz diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/common/settings/ChangeKeyStorePasswordCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/common/settings/ChangeKeyStorePasswordCommand.java index 484ac3a783014..526201ede8f66 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/common/settings/ChangeKeyStorePasswordCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/common/settings/ChangeKeyStorePasswordCommand.java @@ -36,18 +36,12 @@ class ChangeKeyStorePasswordCommand extends BaseKeyStoreCommand { @Override protected void executeCommand(Terminal terminal, OptionSet options, Environment env) throws Exception { - SecureString newPassword = null; - try { - newPassword = readPassword(terminal, true); + try (SecureString newPassword = readPassword(terminal, true)) { final KeyStoreWrapper keyStore = getKeyStore(); keyStore.save(env.configFile(), newPassword.getChars()); terminal.println("Elasticsearch keystore password changed successfully."); } catch (SecurityException e) { throw new UserException(ExitCodes.DATA_ERROR, e.getMessage()); - } finally { - if (null != newPassword) { - newPassword.close(); - } } } } diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/common/settings/CreateKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/common/settings/CreateKeyStoreCommand.java index 873e12eb4c918..379af6f3a47f3 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/common/settings/CreateKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/common/settings/CreateKeyStoreCommand.java @@ -45,8 +45,8 @@ class CreateKeyStoreCommand extends EnvironmentAwareCommand { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - SecureString password = null; - try { + try (SecureString password = options.has(passwordOption) ? + BaseKeyStoreCommand.readPassword(terminal, true) : new SecureString(new char[0])) { Path keystoreFile = KeyStoreWrapper.keystorePath(env.configFile()); if (Files.exists(keystoreFile)) { if (terminal.promptYesNo("An elasticsearch keystore already exists. Overwrite?", false) == false) { @@ -55,16 +55,10 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } } KeyStoreWrapper keystore = KeyStoreWrapper.create(); - password = options.has(passwordOption) ? - BaseKeyStoreCommand.readPassword(terminal, true) : new SecureString(new char[0]); keystore.save(env.configFile(), password.getChars()); terminal.println("Created elasticsearch keystore in " + KeyStoreWrapper.keystorePath(env.configFile())); } catch (SecurityException e) { throw new UserException(ExitCodes.IO_ERROR, "Error creating the elasticsearch keystore."); - } finally { - if (null != password) { - password.close(); - } } } } diff --git a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java index 2ebb7cbde689a..6818f3deb9a85 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java @@ -143,7 +143,7 @@ public void testStdinShort() throws Exception { String password = "keystorepassword"; KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); terminal.addSecretInput(password); - terminal.addSecretInput("secret value 1"); + setInput("secret value 1"); execute("-x", "foo"); assertSecureString("foo", "secret value 1", password); } @@ -152,7 +152,7 @@ public void testStdinLong() throws Exception { String password = "keystorepassword"; KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); terminal.addSecretInput(password); - terminal.addSecretInput("secret value 2"); + setInput("secret value 2"); execute("--stdin", "foo"); assertSecureString("foo", "secret value 2", password); } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 3db958c6ec41e..9a9436d22fad4 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -24,8 +24,8 @@ archivesBaseName = 'elasticsearch-plugin-cli' dependencies { compileOnly project(":server") compileOnly project(":libs:elasticsearch-cli") - compile "org.bouncycastle:bcpg-jdk15on:${versions.bouncycastle}" - compile "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" + compile "org.bouncycastle:bcpg-fips:1.0.3" + compile "org.bouncycastle:bc-fips:1.0.1" testCompile project(":test:framework") testCompile 'com.google.jimfs:jimfs:1.1' testCompile 'com.google.guava:guava:18.0' @@ -44,4 +44,29 @@ thirdPartyAudit.onlyIf { // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, // rather than provide a long list of exclusions, disable the check on FIPS. project.inFipsJvm == false -} \ No newline at end of file +} + +/* + * these two classes intentionally use the following JDK internal APIs in order to offer the necessary + * functionality + * + * sun.security.internal.spec.TlsKeyMaterialParameterSpec + * sun.security.internal.spec.TlsKeyMaterialSpec + * sun.security.internal.spec.TlsMasterSecretParameterSpec + * sun.security.internal.spec.TlsPrfParameterSpec + * sun.security.internal.spec.TlsRsaPremasterSecretParameterSpec + * sun.security.provider.SecureRandom + * + */ +thirdPartyAudit.ignoreViolations( + 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider$CoreSecureRandom', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$BaseTLSKeyGeneratorSpi', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator$2', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator$2', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSPRFKeyGenerator', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator', + 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator$2' +) diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.1.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.1.jar.sha1 new file mode 100644 index 0000000000000..2e4bb227b43bc --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.1.jar.sha1 @@ -0,0 +1 @@ +ed8dd3144761eaa33b9c56f5e2bef85f1b731d6f \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.3.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.3.jar.sha1 new file mode 100644 index 0000000000000..b0655f0dc7893 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.3.jar.sha1 @@ -0,0 +1 @@ +e67f464dc25594fa4dca92281e093eab03e170e0 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.61.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.61.jar.sha1 deleted file mode 100644 index e7c20268ef534..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.61.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -422656435514ab8a28752b117d5d2646660a0ace \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.61.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.61.jar.sha1 deleted file mode 100644 index 0ccfcd61a0e59..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.61.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -00df4b474e71be02c1349c3292d98886f888d1f7 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 06b1aae2bce32..1e670a7f15a98 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.bouncycastle.bcpg.ArmoredInputStream; -import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider; import org.bouncycastle.openpgp.PGPException; import org.bouncycastle.openpgp.PGPPublicKey; import org.bouncycastle.openpgp.PGPPublicKeyRingCollection; @@ -500,17 +500,26 @@ private Path downloadAndValidate( } } - try { - final byte[] zipBytes = Files.readAllBytes(zip); - final String actualChecksum = MessageDigests.toHexString(MessageDigest.getInstance(digestAlgo).digest(zipBytes)); - if (expectedChecksum.equals(actualChecksum) == false) { - throw new UserException( + // read the bytes of the plugin zip in chunks to avoid out of memory errors + try (InputStream zis = Files.newInputStream(zip)) { + try { + final MessageDigest digest = MessageDigest.getInstance(digestAlgo); + final byte[] bytes = new byte[8192]; + int read; + while ((read = zis.read(bytes)) != -1) { + assert read > 0 : read; + digest.update(bytes, 0, read); + } + final String actualChecksum = MessageDigests.toHexString(digest.digest()); + if (expectedChecksum.equals(actualChecksum) == false) { + throw new UserException( ExitCodes.IO_ERROR, digestAlgo + " mismatch, expected " + expectedChecksum + " but got " + actualChecksum); + } + } catch (final NoSuchAlgorithmException e) { + // this should never happen as we are using SHA-1 and SHA-512 here + throw new AssertionError(e); } - } catch (final NoSuchAlgorithmException e) { - // this should never happen as we are using SHA-1 and SHA-512 here - throw new AssertionError(e); } if (officialPlugin) { @@ -551,7 +560,7 @@ void verifySignature(final Path zip, final String urlString) throws IOException, // compute the signature of the downloaded plugin zip final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(ain, new JcaKeyFingerprintCalculator()); final PGPPublicKey key = collection.getPublicKey(signature.getKeyID()); - signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleProvider()), key); + signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleFipsProvider()), key); final byte[] buffer = new byte[1024]; int read; while ((read = fin.read(buffer)) != -1) { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 248eb364ebc64..8a890d8d7ffb8 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -26,7 +26,7 @@ import org.bouncycastle.bcpg.ArmoredOutputStream; import org.bouncycastle.bcpg.BCPGOutputStream; import org.bouncycastle.bcpg.HashAlgorithmTags; -import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider; import org.bouncycastle.openpgp.PGPEncryptedData; import org.bouncycastle.openpgp.PGPException; import org.bouncycastle.openpgp.PGPKeyPair; @@ -36,11 +36,10 @@ import org.bouncycastle.openpgp.PGPSignature; import org.bouncycastle.openpgp.PGPSignatureGenerator; import org.bouncycastle.openpgp.operator.PGPDigestCalculator; -import org.bouncycastle.openpgp.operator.bc.BcPBESecretKeyDecryptorBuilder; -import org.bouncycastle.openpgp.operator.bc.BcPGPContentSignerBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcaPGPContentSignerBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcaPGPDigestCalculatorProviderBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcaPGPKeyPair; +import org.bouncycastle.openpgp.operator.jcajce.JcePBESecretKeyDecryptorBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcePBESecretKeyEncryptorBuilder; import org.elasticsearch.Build; import org.elasticsearch.Version; @@ -61,7 +60,6 @@ import org.elasticsearch.test.PosixPermissionsResetter; import org.junit.After; import org.junit.Before; -import org.junit.BeforeClass; import java.io.BufferedReader; import java.io.ByteArrayInputStream; @@ -141,11 +139,6 @@ public InstallPluginCommandTests(FileSystem fs, Function temp) { System.setProperty("java.io.tmpdir", temp.apply("tmpdir").toString()); } - @BeforeClass - public static void testIfFipsMode() { - assumeFalse("Can't run in a FIPS JVM because this depends on BouncyCastle (non-fips)", inFipsJvm()); - } - @Override @Before public void setUp() throws Exception { @@ -1174,9 +1167,9 @@ public PGPSecretKey newSecretKey() throws NoSuchAlgorithmException, NoSuchProvid sha1Calc, null, null, - new JcaPGPContentSignerBuilder(pkp.getPublicKey().getAlgorithm(), HashAlgorithmTags.SHA1), - new JcePBESecretKeyEncryptorBuilder(PGPEncryptedData.CAST5, sha1Calc) - .setProvider(new BouncyCastleProvider()) + new JcaPGPContentSignerBuilder(pkp.getPublicKey().getAlgorithm(), HashAlgorithmTags.SHA256), + new JcePBESecretKeyEncryptorBuilder(PGPEncryptedData.AES_192, sha1Calc) + .setProvider(new BouncyCastleFipsProvider()) .build("passphrase".toCharArray())); } @@ -1197,11 +1190,11 @@ private String signature(final byte[] bytes, final PGPSecretKey secretKey) { try { final PGPPrivateKey privateKey = secretKey.extractPrivateKey( - new BcPBESecretKeyDecryptorBuilder( + new JcePBESecretKeyDecryptorBuilder( new JcaPGPDigestCalculatorProviderBuilder().build()).build("passphrase".toCharArray())); final PGPSignatureGenerator generator = new PGPSignatureGenerator( - new BcPGPContentSignerBuilder(privateKey.getPublicKeyPacket().getAlgorithm(), HashAlgorithmTags.SHA512)); + new JcaPGPContentSignerBuilder(privateKey.getPublicKeyPacket().getAlgorithm(), HashAlgorithmTags.SHA512)); generator.init(PGPSignature.BINARY_DOCUMENT, privateKey); final ByteArrayOutputStream output = new ByteArrayOutputStream(); try (BCPGOutputStream pout = new BCPGOutputStream(new ArmoredOutputStream(output)); diff --git a/docs/README.asciidoc b/docs/README.asciidoc index b7fad903f54b0..3153e3dcc7802 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -76,7 +76,8 @@ for its modifiers: but rather than the setup defined in `docs/build.gradle` the setup is defined right in the documentation file. In general, we should prefer `// TESTSETUP` over `// TEST[setup:name]` because it makes it more clear what steps have to - be taken before the examples will work. + be taken before the examples will work. Tip: `// TESTSETUP` can only be used + on the first snippet of a document. * `// NOTCONSOLE`: Marks this snippet as neither `// CONSOLE` nor `// TESTRESPONSE`, excluding it from the list of unconverted snippets. We should only use this for snippets that *are* JSON but are *not* responses or diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 224be0a0cee30..8ad24f5489228 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -5,8 +5,8 @@ bare_version never includes -alpha or -beta :bare_version: 8.0.0 :major-version: 8.x :prev-major-version: 7.x -:lucene_version: 8.1.0 -:lucene_version_path: 8_1_0 +:lucene_version: 8.2.0 +:lucene_version_path: 8_2_0 :branch: master :jdk: 11.0.2 :jdk_major: 11 diff --git a/docs/build.gradle b/docs/build.gradle index d13f4ca3b2edb..c80c16952e7cf 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1,4 +1,4 @@ -import static org.elasticsearch.gradle.Distribution.DEFAULT +import static org.elasticsearch.gradle.testclusters.TestDistribution.DEFAULT /* * Licensed to Elasticsearch under one or more contributor @@ -23,27 +23,28 @@ apply plugin: 'elasticsearch.docs-test' /* List of files that have snippets that will not work until platinum tests can occur ... */ buildRestTests.expectedUnconvertedCandidates = [ - 'reference/ml/transforms.asciidoc', - 'reference/ml/apis/delete-calendar-event.asciidoc', - 'reference/ml/apis/get-bucket.asciidoc', - 'reference/ml/apis/get-category.asciidoc', - 'reference/ml/apis/get-influencer.asciidoc', - 'reference/ml/apis/get-job-stats.asciidoc', - 'reference/ml/apis/get-overall-buckets.asciidoc', - 'reference/ml/apis/get-record.asciidoc', - 'reference/ml/apis/get-snapshot.asciidoc', - 'reference/ml/apis/post-data.asciidoc', - 'reference/ml/apis/revert-snapshot.asciidoc', - 'reference/ml/apis/update-snapshot.asciidoc', + 'reference/ml/anomaly-detection/transforms.asciidoc', + 'reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc', + 'reference/ml/anomaly-detection/apis/get-bucket.asciidoc', + 'reference/ml/anomaly-detection/apis/get-category.asciidoc', + 'reference/ml/anomaly-detection/apis/get-influencer.asciidoc', + 'reference/ml/anomaly-detection/apis/get-job-stats.asciidoc', + 'reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc', + 'reference/ml/anomaly-detection/apis/get-record.asciidoc', + 'reference/ml/anomaly-detection/apis/get-snapshot.asciidoc', + 'reference/ml/anomaly-detection/apis/post-data.asciidoc', + 'reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc', + 'reference/ml/anomaly-detection/apis/update-snapshot.asciidoc', ] testClusters.integTest { - if (singleNode().distribution == DEFAULT) { + if (singleNode().testDistribution == DEFAULT) { setting 'xpack.license.self_generated.type', 'trial' } // enable regexes in painless so our tests don't complain about example snippets that use them setting 'script.painless.regex.enabled', 'true' + setting 'path.repo', "${buildDir}/cluster/shared/repo" Closure configFile = { extraConfigFile it, file("src/test/cluster/config/$it") } @@ -1185,3 +1186,13 @@ buildRestTests.setups['logdata_job'] = buildRestTests.setups['setup_logdata'] + } } ''' +// Used by snapshot lifecycle management docs +buildRestTests.setups['setup-repository'] = ''' + - do: + snapshot.create_repository: + repository: my_repository + body: + type: fs + settings: + location: buildDir/cluster/shared/repo +''' diff --git a/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc index a60c98ce37b84..c90795c71fe07 100644 --- a/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc @@ -16,6 +16,9 @@ A +{request}+ object requires a non-null `id`. include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- <1> Constructing a new request referencing an existing {dataframe-transform} +<2> Sets the optional argument `force`. When `true`, the {dataframe-transform} +is deleted regardless of its current state. The default value is `false`, +meaning that only `stopped` {dataframe-transforms} can be deleted. include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc index 19c7fe443dbcd..50362d2fc4a07 100644 --- a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc @@ -19,7 +19,11 @@ A +{request}+ requires the following argument: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> The configuration of the {dataframe-job} to create +<1> The configuration of the {dataframe-transform} to create +<2> Whether or not to wait to run deferrable validations until `_start` is called. +This option should be used with care as the created {dataframe-transform} will run +with the privileges of the user creating it. Meaning, if they do not have privileges, +such an error will not be visible until `_start` is called. [id="{upid}-{api}-config"] ==== Data Frame Transform Configuration @@ -34,8 +38,9 @@ include-tagged::{doc-tests-file}[{api}-config] <1> The {dataframe-transform} ID <2> The source indices and query from which to gather data <3> The destination index and optional pipeline -<4> The PivotConfig -<5> Optional free text description of the transform +<4> How often to check for updates to the source indices +<5> The PivotConfig +<6> Optional free text description of the transform [id="{upid}-{api}-query-config"] diff --git a/docs/java-rest/high-level/document/multi-get.asciidoc b/docs/java-rest/high-level/document/multi-get.asciidoc index ca26139d23057..1cf487dec1399 100644 --- a/docs/java-rest/high-level/document/multi-get.asciidoc +++ b/docs/java-rest/high-level/document/multi-get.asciidoc @@ -65,7 +65,7 @@ include-tagged::{doc-tests-file}[{api}-request-item-extras] <2> Version <3> Version type -{ref}/search-request-preference.html[`preference`], +{ref}/search-request-body.html#request-body-search-preference[`preference`], {ref}/docs-get.html#realtime[`realtime`] and {ref}/docs-get.html#get-refresh[`refresh`] can be set on the main request but diff --git a/docs/java-rest/high-level/ilm/delete_snapshot_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/delete_snapshot_lifecycle_policy.asciidoc new file mode 100644 index 0000000000000..66819d06187b7 --- /dev/null +++ b/docs/java-rest/high-level/ilm/delete_snapshot_lifecycle_policy.asciidoc @@ -0,0 +1,36 @@ +-- +:api: slm-delete-snapshot-lifecycle-policy +:request: DeleteSnapshotLifecyclePolicyRequest +:response: AcknowledgedResponse +-- + +[id="{upid}-{api}"] +=== Delete Snapshot Lifecycle Policy API + + +[id="{upid}-{api}-request"] +==== Request + +The Delete Snapshot Lifecycle Policy API allows you to delete a Snapshot Lifecycle Management Policy +from the cluster. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The policy with the id `policy_id` will be deleted. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the delete snapshot lifecycle policy request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the delete snapshot lifecycle policy request was acknowledged. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ilm/execute_snapshot_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/execute_snapshot_lifecycle_policy.asciidoc new file mode 100644 index 0000000000000..7b3af935a27c7 --- /dev/null +++ b/docs/java-rest/high-level/ilm/execute_snapshot_lifecycle_policy.asciidoc @@ -0,0 +1,36 @@ +-- +:api: slm-execute-snapshot-lifecycle-policy +:request: ExecuteSnapshotLifecyclePolicyRequest +:response: ExecuteSnapshotLifecyclePolicyResponse +-- + +[id="{upid}-{api}"] +=== Execute Snapshot Lifecycle Policy API + + +[id="{upid}-{api}-request"] +==== Request + +The Execute Snapshot Lifecycle Policy API allows you to execute a Snapshot Lifecycle Management +Policy, taking a snapshot immediately. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The policy id to execute + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains the name of the snapshot that was created. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The created snapshot name + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_policy.asciidoc new file mode 100644 index 0000000000000..eaa8af7969ee0 --- /dev/null +++ b/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_policy.asciidoc @@ -0,0 +1,39 @@ +-- +:api: slm-get-snapshot-lifecycle-policy +:request: GetSnapshotLifecyclePolicyRequest +:response: GetSnapshotLifecyclePolicyResponse +-- + +[id="{upid}-{api}"] +=== Get Snapshot Lifecycle Policy API + + +[id="{upid}-{api}-request"] +==== Request + +The Get Snapshot Lifecycle Policy API allows you to retrieve the definition of a Snapshot Lifecycle +Management Policy from the cluster. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Gets all policies. +<2> Gets `policy_id` + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains a map of `SnapshotLifecyclePolicyMetadata`, accessible by the id +of the policy, which contains data about each policy, as well as the policy definition. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The retrieved policies are retrieved by id. +<2> The policy definition itself. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ilm/put_snapshot_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/put_snapshot_lifecycle_policy.asciidoc new file mode 100644 index 0000000000000..7fe7fec26c3b7 --- /dev/null +++ b/docs/java-rest/high-level/ilm/put_snapshot_lifecycle_policy.asciidoc @@ -0,0 +1,35 @@ +-- +:api: slm-put-snapshot-lifecycle-policy +:request: PutSnapshotLifecyclePolicyRequest +:response: AcknowledgedResponse +-- + +[id="{upid}-{api}"] +=== Put Snapshot Lifecycle Policy API + + +[id="{upid}-{api}-request"] +==== Request + +The Put Snapshot Lifecycle Policy API allows you to add of update the definition of a Snapshot +Lifecycle Management Policy in the cluster. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the put snapshot lifecycle policy request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the put snapshot lifecycle policy was acknowledged. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index 1c454ce6ba14e..2ec7c3122b7a9 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -430,7 +430,7 @@ include-tagged::{doc-tests-file}[{api}-request-profiling-queries-results] <3> Retrieve the time in millis spent executing the Lucene query <4> Retrieve the profile results for the sub-queries (if any) -The Rest API documentation contains more information about {ref}/search-profile-queries.html[Profiling Queries] with +The Rest API documentation contains more information about {ref}/search-profile.html#profiling-queries[Profiling Queries] with a description of the query profiling information. The `QueryProfileShardResult` also gives access to the profiling information for the Lucene collectors: @@ -445,7 +445,7 @@ include-tagged::{doc-tests-file}[{api}-request-profiling-queries-collectors] <4> Retrieve the profile results for the sub-collectors (if any) The Rest API documentation contains more information about profiling information -for Lucene collectors. See {ref}/search-profile-queries.html[Profiling queries]. +for Lucene collectors. See {ref}/search-profile.html#profiling-queries[Profiling queries]. In a very similar manner to the query tree execution, the `QueryProfileShardResult` objects gives access to the detailed aggregations tree execution: @@ -461,4 +461,4 @@ include-tagged::{doc-tests-file}[{api}-request-profiling-aggs] <5> Retrieve the profile results for the sub-aggregations (if any) The Rest API documentation contains more information about -{ref}/search-profile-aggregations.html[Profiling aggregations]. +{ref}/search-profile.html#profiling-aggregations[Profiling aggregations]. diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index ccc9e3ac4db24..3bb3fc7d97e29 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -23,7 +23,7 @@ specialized code may define new ways to use a Painless script. | Reindex | <> | {ref}/docs-reindex.html[Elasticsearch Documentation] | Sort | <> - | {ref}/search-request-sort.html[Elasticsearch Documentation] + | {ref}/search-request-body.html#request-body-search-sort[Elasticsearch Documentation] | Similarity | <> | {ref}/index-modules-similarity.html[Elasticsearch Documentation] | Weight | <> @@ -31,7 +31,7 @@ specialized code may define new ways to use a Painless script. | Score | <> | {ref}/query-dsl-function-score-query.html[Elasticsearch Documentation] | Field | <> - | {ref}/search-request-script-fields.html[Elasticsearch Documentation] + | {ref}/search-request-body.html#request-body-search-script-fields[Elasticsearch Documentation] | Filter | <> | {ref}/query-dsl-script-query.html[Elasticsearch Documentation] | Minimum should match | <> diff --git a/docs/painless/painless-contexts/painless-field-context.asciidoc b/docs/painless/painless-contexts/painless-field-context.asciidoc index 5a95e88c68460..3a3e9fe0adb96 100644 --- a/docs/painless/painless-contexts/painless-field-context.asciidoc +++ b/docs/painless/painless-contexts/painless-field-context.asciidoc @@ -2,7 +2,7 @@ === Field context Use a Painless script to create a -{ref}/search-request-script-fields.html[script field] to return +{ref}/search-request-body.html#request-body-search-script-fields[script field] to return a customized value for each document in the results of a query. *Variables* diff --git a/docs/painless/painless-contexts/painless-sort-context.asciidoc b/docs/painless/painless-contexts/painless-sort-context.asciidoc index 4a7743dc48800..74bce44c800f8 100644 --- a/docs/painless/painless-contexts/painless-sort-context.asciidoc +++ b/docs/painless/painless-contexts/painless-sort-context.asciidoc @@ -2,7 +2,7 @@ === Sort context Use a Painless script to -{ref}/search-request-sort.html[sort] the documents in a query. +{ref}/search-request-body.html#request-body-search-sort[sort] the documents in a query. *Variables* diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index fd51bd881daf0..9808aff41ddca 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -181,8 +181,8 @@ discovery. For instance, the following settings will only use nodes with a [source,yaml] ---- -discovery.ec2.tags.role: master -discovery.ec2.tags.environment: dev,staging +discovery.ec2.tag.role: master +discovery.ec2.tag.environment: dev,staging ---- NOTE: The names of tags used for discovery may only contain ASCII letters, diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 48b03f1abc156..967b7f71c3ed1 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -62,13 +62,28 @@ with the exception of the secure settings, which you add to the {es} keystore. For more information about creating and updating the {es} keystore, see {ref}/secure-settings.html[Secure settings]. -For example, before you start the node, run these commands to add AWS access key -settings to the keystore: +For example, if you want to use specific credentials to access S3 then run the +following commands to add these credentials to the keystore: [source,sh] ---- bin/elasticsearch-keystore add s3.client.default.access_key bin/elasticsearch-keystore add s3.client.default.secret_key +# a session token is optional so the following command may not be needed +bin/elasticsearch-keystore add s3.client.default.session_token +---- + +If instead you want to use the instance role or container role to access S3 +then you should leave these settings unset. You can switch from using specific +credentials back to the default of using the instance role or container role by +removing these settings from the keystore as follows: + +[source,sh] +---- +bin/elasticsearch-keystore remove s3.client.default.access_key +bin/elasticsearch-keystore remove s3.client.default.secret_key +# a session token is optional so the following command may not be needed +bin/elasticsearch-keystore remove s3.client.default.session_token ---- *All* client secure settings of this plugin are @@ -88,16 +103,17 @@ settings belong in the `elasticsearch.yml` file. `access_key` ({ref}/secure-settings.html[Secure]):: - An S3 access key. The `secret_key` setting must also be specified. + An S3 access key. If set, the `secret_key` setting must also be specified. + If unset, the client will use the instance or container role instead. `secret_key` ({ref}/secure-settings.html[Secure]):: - An S3 secret key. The `access_key` setting must also be specified. + An S3 secret key. If set, the `access_key` setting must also be specified. `session_token` ({ref}/secure-settings.html[Secure]):: - An S3 session token. The `access_key` and `secret_key` settings must also be - specified. + An S3 session token. If set, the `access_key` and `secret_key` settings + must also be specified. `endpoint`:: @@ -159,6 +175,17 @@ https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the path style access pattern. If your deployment requires the path style access pattern then you should set this setting to `true` when upgrading. +`disable_chunked_encoding`:: + + Whether chunked encoding should be disabled or not. If `false`, chunked + encoding is enabled and will be used where appropriate. If `true`, chunked + encoding is disabled and will not be used, which may mean that snapshot + operations consume more resources and take longer to complete. It should + only be set to `true` if you are using a storage service that does not + support chunked encoding. See the + https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3Builder.html#disableChunkedEncoding--[AWS + Java SDK documentation] for details. Defaults to `false`. + [float] [[repository-s3-compatible-services]] ===== S3-compatible services diff --git a/docs/reference/administering.asciidoc b/docs/reference/administering.asciidoc index 7170185c6ef77..0a3901cf7ed30 100644 --- a/docs/reference/administering.asciidoc +++ b/docs/reference/administering.asciidoc @@ -9,17 +9,4 @@ cluster. -- -[[backup-cluster]] -== Back up a cluster - -As with any software that stores data, it is important to routinely back up your -data. {es} replicas provide high availability during runtime; they enable you to -tolerate sporadic node loss without an interruption of service. - -Replicas do not provide protection from catastrophic failure, however. For that, -you need a real backup of your cluster—a complete copy in case something goes -wrong. - -To back up your cluster, you can use the <>. - -include::{es-repo-dir}/modules/snapshots.asciidoc[tag=snapshot-intro] +include::administering/backup-cluster.asciidoc[] \ No newline at end of file diff --git a/docs/reference/administering/backup-and-restore-security-config.asciidoc b/docs/reference/administering/backup-and-restore-security-config.asciidoc new file mode 100644 index 0000000000000..847557db48610 --- /dev/null +++ b/docs/reference/administering/backup-and-restore-security-config.asciidoc @@ -0,0 +1,281 @@ +[role="xpack"] +[testenv="basic"] +[[security-backup]] +=== Back up a cluster's security configuration +++++ +Back up the security configuration +++++ + +Security configuration information resides in two places: +<> and +<>. + +[discrete] +[[backup-security-file-based-configuration]] +==== Back up file-based security configuration + +{es} {security-features} are configured using the <> inside the `elasticsearch.yml` and +`elasticsearch.keystore` files. In addition there are several other +<> inside the same `ES_PATH_CONF` +directory. These files define roles and role mappings and +<>. Some of the +settings specify file paths to security-sensitive data, such as TLS keys and +certificates for the HTTP client and inter-node communication and private key files for +<>, <> and the +<> realms. All these are also stored inside +`ES_PATH_CONF`; the path settings are relative. + +IMPORTANT: The `elasticsearch.keystore`, TLS keys and SAML, OIDC, and Kerberos +realms private key files require confidentiality. This is crucial when files +are copied to the backup location, as this increases the surface for malicious +snooping. + +To back up all this configuration you can use a <>, as described in the previous section. + +[NOTE] +==== + +* File backups must run on every cluster node. +* File backups will store non-security configuration as well. Backing-up +only {security-features} configuration is not supported. A backup is a +point in time record of state of the complete configuration. + +==== + +[discrete] +[[backup-security-index-configuration]] +==== Back up index-based security configuration + +{es} {security-features} store system configuration data inside a +dedicated index. This index is named `.security-6` in the {es} 6.x versions and +`.security-7` in the 7.x releases. The `.security` alias always points to the +appropriate index. This index contains the data which is not available in +configuration files and *cannot* be reliably backed up using standard +filesystem tools. This data describes: + +* the definition of users in the native realm (including hashed passwords) +* role definitions (defined via the <>) +* role mappings (defined via the + <>) +* application privileges +* API keys + +The `.security` index thus contains resources and definitions in addition to +configuration information. All of that information is required in a complete +{security-features} backup. + +Use the <> to backup +`.security`, as you would for any <>. +For convenience, here are the complete steps: + +. Create a repository that you can use to backup the `.security` index. +It is preferable to have a <> for +this special index. If you wish, you can also snapshot the system indices for other {stack} components to this repository. ++ +-- +[source,js] +----------------------------------- +PUT /_snapshot/my_backup +{ + "type": "fs", + "settings": { + "location": "my_backup_location" + } +} +----------------------------------- +// CONSOLE + +The user calling this API must have the elevated `manage` cluster privilege to +prevent non-administrators exfiltrating data. + +-- + +. Create a user and assign it only the built-in `snapshot_user` role. ++ +-- +The following example creates a new user `snapshot_user` in the +{stack-ov}/native-realm.html[native realm], but it is not important which +realm the user is a member of: + +[source,js] +-------------------------------------------------- +POST /_security/user/snapshot_user +{ + "password" : "secret", + "roles" : [ "snapshot_user" ] +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:security is not enabled in this fixture] + +-- + +. Create incremental snapshots authorized as `snapshot_user`. ++ +-- +The following example shows how to use the create snapshot API to backup +the `.security` index to the `my_backup` repository: + +[source,js] +-------------------------------------------------- +PUT /_snapshot/my_backup/snapshot_1 +{ + "indices": ".security", + "include_global_state": true <1> +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> This parameter value captures all the persistent settings stored in the +global cluster metadata as well as other configurations such as aliases and +stored scripts. Note that this includes non-security configuration and that it complements but does not replace the +<>. + +-- + +IMPORTANT: The index format is only compatible within a single major version, +and cannot be restored onto a version earlier than the version from which it +originated. For example, you can restore a security snapshot from 6.6.0 into a +6.7.0 cluster, but you cannot restore it to a cluster running {es} 6.5.0 or 7.0.0. + +[discrete] +[[backup-security-repos]] +===== Controlling access to the backup repository + +The snapshot of the security index will typically contain sensitive data such +as user names and password hashes. Because passwords are stored using +<>, the disclosure of a snapshot would +not automatically enable a third party to authenticate as one of your users or +use API keys. However, it would disclose confidential information. + +It is also important that you protect the integrity of these backups in case +you ever need to restore them. If a third party is able to modify the stored +backups, they may be able to install a back door that would grant access if the +snapshot is loaded into an {es} cluster. + +We recommend that you: + +* Snapshot the `.security` index in a dedicated repository, where read and write +access is strictly restricted and audited. +* If there are indications that the snapshot has been read, change the passwords +of the users in the native realm and revoke API keys. +* If there are indications that the snapshot has been tampered with, do not +restore it. There is currently no option for the restore process to detect +malicious tampering. + +[[restore-security-configuration]] +=== Restore a cluster's security configuration +++++ +Restore the security configuration +++++ + +NOTE: You can restore a snapshot of the `.security` index only if it was +created in a previous minor version in the same major version. The last minor +version of every major release can convert and read formats of the index for +both its major version and the next one. + +When you restore security configuration you have the option of doing a complete +restore of *all* configurations, including non-security ones, or to only restore +the contents of the `.security` index. As described in +<>, the second option comprises only +resource-type configurations. The first option has the advantage of restoring +a cluster to a clearly defined state from a past point in time. The second option +touches only security configuration resources, but it does not completely restore +the {security-features}. + +To restore your security configuration from a backup, first make sure that the +repository holding `.security` snapshots is installed: + +[source,js] +-------------------------------------------------- +GET /_snapshot/my_backup +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +GET /_snapshot/my_backup/snapshot_1 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Then log into one of the node hosts, navigate to {es} installation directory, +and follow these steps: + +. Add a new user with the `superuser` built-in role to the +{stack-ov}/file-realm.html[file realm]. ++ +-- +For example, create a user named `restore_user`: +[source,shell] +-------------------------------------------------- +bin/elasticsearch-users useradd restore_user -p password -r superuser +-------------------------------------------------- +-- + +. Using the previously created user, delete the existing `.security-6` or +`.security-7` index. ++ +-- +[source,shell] +-------------------------------------------------- +curl -u restore_user -X DELETE "localhost:9200/.security-*" +-------------------------------------------------- +// NOTCONSOLE + +WARNING: After this step any authentication that relies on the `.security` +index will not work. This means that all API calls that authenticate with +native or reserved users will fail, as will any user that relies on a native role. +The file realm user we created in the step above will continue to work +because it is not stored in the `.security` index and uses the built-in +`superuser` role. + +-- + +. Using the same user, restore the `.security` index from the snapshot. ++ +-- +[source,shell] +-------------------------------------------------- + curl -u restore_user -X POST "localhost:9200/_snapshot/my_backup/snapshot_1/_restore" -H 'Content-Type: application/json' -d' + { + "indices": ".security-*", + "include_global_state": true <1> + } + ' +-------------------------------------------------- +// NOTCONSOLE + +<1> The `include_global_state: true` is mandatory only for a complete restore. +This will restore the global cluster metadata, which contains configuration +information for the complete cluster. If you set this to `false`, it recovers +only the contents of the `.security` index, such as usernames and password +hashes, API keys, application privileges, role and role mapping definitions. +-- + +. Optionally, if you need to review and override the settings that were included +in the snapshot (by the `include_global_state` flag), cherry-pick and +<> that you +<> with the +`GET _cluster/settings` API. + +. If you pursue a complete point in time restore of the cluster, you also have +to restore configuration files. Again, this will restore non-security settings as +well. ++ +-- +This entails a straight-up filesystem copy of the backed up configuration files, +overwriting the contents of `$ES_PATH_CONF`, and restarting the node. This +needs to be done on *every node*. Depending on the extent of the differences +between your current cluster configuration and the restored configuration, you +may not be able to perform a rolling restart. If you are performing a full +restore of your configuration directory, we recommend a full cluster restart as +the safest option. Alternatively, you may wish to restore your configuration +files to a separate location on disk and use file comparison tools to review +the differences between your existing configuration and the restored +configuration. +-- diff --git a/docs/reference/administering/backup-cluster-config.asciidoc b/docs/reference/administering/backup-cluster-config.asciidoc new file mode 100644 index 0000000000000..373ff48618de8 --- /dev/null +++ b/docs/reference/administering/backup-cluster-config.asciidoc @@ -0,0 +1,62 @@ +[[backup-cluster-configuration]] +=== Back up a cluster's configuration +++++ +Back up the cluster configuration +++++ + +In addition to backing up the data in a cluster, it is important to back up its configuration--especially when the cluster becomes large and difficult to +reconstruct. + +Configuration information resides in +<> on every cluster node. Sensitive +setting values such as passwords for the {watcher} notification servers, are +specified inside a binary secure container, the +<> file. Some setting values are +file paths to the associated configuration data, such as the ingest geo ip +database. All these files are contained inside the `ES_PATH_CONF` directory. + +NOTE: All changes to configuration files are done by manually editing the files +or using command line utilities, but *not* through APIs. In practice, these +changes are infrequent after the initial setup. + +We recommend that you take regular (ideally, daily) backups of your {es} config +(`$ES_PATH_CONF`) directory using the file backup software of your choice. + +TIP: We recommend that you have a configuration management plan for these +configuration files. You may wish to check them into version control, or +provision them though your choice of configuration management tool. + +Some of these files may contain sensitive data such as passwords and TLS keys, +therefore you should investigate whether your backup software and/or storage +solution are able to encrypt this data. + +Some settings in configuration files might be overridden by +<>. You can capture these settings in +a *data* backup snapshot by specifying the `include_global_state: true` (default) +parameter for the snapshot API. Alternatively, you can extract these +configuration values in text format by using the +<>: + +[source,js] +-------------------------------------------------- +GET _cluster/settings?pretty&flat_settings&filter_path=persistent +-------------------------------------------------- +//CONSOLE +//TEST + +You can store the output of this as a file together with the rest of +configuration files. + +[NOTE] +==== + +* Transient settings are not considered for backup. +* {es} {security-features} store configuration data such as role definitions and +API keys inside a dedicate special index. This "system" data, +complements the <> configuration and should +be <>. +* Other {stack} components, like Kibana and {ml-cap}, store their configuration +data inside other dedicated indices. From the {es} perspective these are just data +so you can use the regular <> process. + +==== diff --git a/docs/reference/administering/backup-cluster-data.asciidoc b/docs/reference/administering/backup-cluster-data.asciidoc new file mode 100644 index 0000000000000..063018337d666 --- /dev/null +++ b/docs/reference/administering/backup-cluster-data.asciidoc @@ -0,0 +1,35 @@ +[[backup-cluster-data]] +=== Back up a cluster's data +++++ +Back up the data +++++ + +As with any software that stores data, it is important to routinely back up your +data. {es} replicas provide high availability during runtime; they enable you to +tolerate sporadic node loss without an interruption of service. + +Replicas do not provide protection from catastrophic failure, however. For that, +you need a real backup of your cluster—a complete copy in case something goes +wrong. + +To back up your cluster's data, you can use the <>. + +include::{es-repo-dir}/modules/snapshots.asciidoc[tag=snapshot-intro] + +[TIP] +==== +If your cluster has {es} {security-features} enabled, when you back up your data +the snapshot API call must be authorized. + +The `snapshot_user` role is a reserved role that can be assigned to the user +who is calling the snapshot endpoint. This is the only role necessary if all the user +does is periodic snapshots as part of the backup procedure. This role includes +the privileges to list all the existing snapshots (of any repository) as +well as list and view settings of all indices, including the `.security` index. +It does *not* grant privileges to create repositories, restore snapshots, or +search within indices. Hence, the user can view and snapshot all indices, but cannot +access or modify any data. + +For more information, see {stack-ov}/security-privileges.html[Security privileges] +and {stack-ov}/built-in-roles.html[Built-in roles]. +==== diff --git a/docs/reference/administering/backup-cluster.asciidoc b/docs/reference/administering/backup-cluster.asciidoc new file mode 100644 index 0000000000000..5544af65bc172 --- /dev/null +++ b/docs/reference/administering/backup-cluster.asciidoc @@ -0,0 +1,22 @@ +[[backup-cluster]] +== Back up a cluster + +include::{es-repo-dir}/modules/snapshots.asciidoc[tag=backup-warning] + +To have a complete backup for your cluster: + +. <> +. <> +. <> + +To restore your cluster from a backup: + +. <> +. <> + + + +include::backup-cluster-data.asciidoc[] +include::backup-cluster-config.asciidoc[] +include::backup-and-restore-security-config.asciidoc[] +include::restore-cluster-data.asciidoc[] diff --git a/docs/reference/administering/restore-cluster-data.asciidoc b/docs/reference/administering/restore-cluster-data.asciidoc new file mode 100644 index 0000000000000..c9ae6da339fd4 --- /dev/null +++ b/docs/reference/administering/restore-cluster-data.asciidoc @@ -0,0 +1,15 @@ +[[restore-cluster-data]] +=== Restore a cluster's data +++++ +Restore the data +++++ + +include::{es-repo-dir}/modules/snapshots.asciidoc[tag=restore-intro] + +[TIP] +==== +If your cluster has {es} {security-features} enabled, the restore API requires the `manage` cluster privilege. There is no bespoke role for the restore process. This privilege is very permissive and should only +be granted to users in the "administrator" category. Specifically, it allows +malicious users to exfiltrate data to a location of their choosing. Automated +tools should not run as users with this privilege. +==== diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index 45ab691604842..44a0ac79929c1 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -6,7 +6,7 @@ A multi-bucket aggregation that creates composite buckets from different sources Unlike the other `multi-bucket` aggregation the `composite` aggregation can be used to paginate **all** buckets from a multi-level aggregation efficiently. This aggregation provides a way to stream **all** buckets of a specific aggregation similarly to what -<> does for documents. +<> does for documents. The composite buckets are built from the combinations of the values extracted/created for each document and each combination is considered as @@ -590,3 +590,13 @@ GET /_search } -------------------------------------------------- // TESTRESPONSE[s/\.\.\.//] + +==== Pipeline aggregations + +The composite agg is not currently compatible with pipeline aggregations, nor does it make sense in most cases. +E.g. due to the paging nature of composite aggs, a single logical partition (one day for example) might be spread +over multiple pages. Since pipeline aggregations are purely post-processing on the final list of buckets, +running something like a derivative on a composite page could lead to inaccurate results as it is only taking into +account a "partial" result on that page. + +Pipeline aggs that are self contained to a single bucket (such as `bucket_selector`) might be supported in the future. diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index 1ea38fec9657b..29949bd3582c4 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -17,15 +17,15 @@ One or more bucket aggregators determines by which properties a result set get s The top_hits aggregation returns regular search hits, because of this many per hit features can be supported: -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> ==== Example diff --git a/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc index d219e005d75d0..a11da760edd02 100644 --- a/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc @@ -38,7 +38,7 @@ is ascending. [options="header"] |=== |Parameter Name |Description |Required |Default Value -|`sort` |The list of fields to sort on. See <> for more details. |Optional | +|`sort` |The list of fields to sort on. See <> for more details. |Optional | |`from` |Buckets in positions prior to the set value will be truncated. |Optional | `0` |`size` |The number of buckets to return. Defaults to all buckets of the parent aggregation. |Optional | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 2e3ae8b036c3f..94f5930e88cfa 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -10,7 +10,7 @@ Edge N-Grams are useful for _search-as-you-type_ queries. TIP: When you need _search-as-you-type_ for text which has a widely known order, such as movie or song titles, the -<> is a much more efficient +<> is a much more efficient choice than edge N-grams. Edge N-grams have the advantage when trying to autocomplete words that can appear in any order. diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index b46bd21a32bb1..c516f0e92c0c4 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -1,8 +1,6 @@ [[api-conventions]] -= API conventions +== API conventions -[partintro] --- The *Elasticsearch* REST APIs are exposed using <>. The conventions listed in this chapter can be applied throughout the REST @@ -13,10 +11,8 @@ API, unless otherwise specified. * <> * <> --- - [[multi-index]] -== Multiple Indices +=== Multiple Indices Most APIs that refer to an `index` parameter support execution across multiple indices, using simple `test1,test2,test3` notation (or `_all` for all indices). It also @@ -59,7 +55,7 @@ NOTE: Single index APIs such as the <> and the <> do not support multiple indices. [[date-math-index-names]] -== Date math support in index names +=== Date math support in index names Date math index name resolution enables you to search a range of time-series indices, rather than searching all of your time-series indices and filtering the results or maintaining aliases. @@ -168,12 +164,12 @@ GET /%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogs // TEST[s/now/2016.09.20||/] [[common-options]] -== Common options +=== Common options The following options can be applied to all of the REST APIs. [float] -=== Pretty Results +==== Pretty Results When appending `?pretty=true` to any request made, the JSON returned will be pretty formatted (use it for debugging only!). Another option is @@ -182,7 +178,7 @@ to set `?format=yaml` which will cause the result to be returned in the [float] -=== Human readable output +==== Human readable output Statistics are returned in a format suitable for humans (e.g. `"exists_time": "1h"` or `"size": "1kb"`) and for computers @@ -195,7 +191,7 @@ consumption. The default for the `human` flag is [[date-math]] [float] -=== Date Math +==== Date Math Most parameters which accept a formatted date value -- such as `gt` and `lt` in <>, or `from` and `to` @@ -233,7 +229,7 @@ Assuming `now` is `2001-01-01 12:00:00`, some examples are: [float] [[common-options-response-filtering]] -=== Response Filtering +==== Response Filtering All REST APIs accept a `filter_path` parameter that can be used to reduce the response returned by Elasticsearch. This parameter takes a comma @@ -400,7 +396,7 @@ GET /_search?filter_path=hits.hits._source&_source=title&sort=rating:desc [float] -=== Flat Settings +==== Flat Settings The `flat_settings` flag affects rendering of the lists of settings. When the `flat_settings` flag is `true`, settings are returned in a flat format: @@ -471,27 +467,27 @@ Returns: By default `flat_settings` is set to `false`. [float] -=== Parameters +==== Parameters Rest parameters (when using HTTP, map to HTTP URL parameters) follow the convention of using underscore casing. [float] -=== Boolean Values +==== Boolean Values All REST API parameters (both request parameters and JSON body) support providing boolean "false" as the value `false` and boolean "true" as the value `true`. All other values will raise an error. [float] -=== Number Values +==== Number Values All REST APIs support providing numbered parameters as `string` on top of supporting the native JSON number types. [[time-units]] [float] -=== Time units +==== Time units Whenever durations need to be specified, e.g. for a `timeout` parameter, the duration must specify the unit, like `2d` for 2 days. The supported units are: @@ -507,7 +503,7 @@ the unit, like `2d` for 2 days. The supported units are: [[byte-units]] [float] -=== Byte size units +==== Byte size units Whenever the byte size of data needs to be specified, e.g. when setting a buffer size parameter, the value must specify the unit, like `10kb` for 10 kilobytes. Note that @@ -523,7 +519,7 @@ these units use powers of 1024, so `1kb` means 1024 bytes. The supported units a [[size-units]] [float] -=== Unit-less quantities +==== Unit-less quantities Unit-less quantities means that they don't have a "unit" like "bytes" or "Hertz" or "meter" or "long tonne". @@ -539,7 +535,7 @@ when we mean 87 though. These are the supported multipliers: [[distance-units]] [float] -=== Distance Units +==== Distance Units Wherever distances need to be specified, such as the `distance` parameter in the <>), the default unit is meters if none is specified. @@ -561,7 +557,7 @@ Nautical mile:: `NM`, `nmi`, or `nauticalmiles` [[fuzziness]] [float] -=== Fuzziness +==== Fuzziness Some queries and APIs support parameters to allow inexact _fuzzy_ matching, using the `fuzziness` parameter. @@ -594,7 +590,7 @@ the default values are 3 and 6, equivalent to `AUTO:3,6` that make for lengths: [float] [[common-options-error-options]] -=== Enabling stack traces +==== Enabling stack traces By default when a request returns an error Elasticsearch doesn't include the stack trace of the error. You can enable that behavior by setting the @@ -672,7 +668,7 @@ The response looks like: // TESTRESPONSE[s/"stack_trace": "java.lang.Number.+\.\.\."/"stack_trace": $body.error.caused_by.stack_trace/] [float] -=== Request body in query string +==== Request body in query string For libraries that don't accept a request body for non-POST requests, you can pass the request body as the `source` query string parameter @@ -681,7 +677,7 @@ should also be passed with a media type value that indicates the format of the source, such as `application/json`. [float] -=== Content-Type Requirements +==== Content-Type Requirements The type of the content sent in a request body must be specified using the `Content-Type` header. The value of this header must map to one of @@ -694,7 +690,7 @@ content type must be specified using the `source_content_type` query string parameter. [[url-access-control]] -== URL-based access control +=== URL-based access control Many users use a proxy with URL-based access control to secure access to Elasticsearch indices. For <>, diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 743bae7b85679..20e6a53a61284 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -1,11 +1,8 @@ [[cat]] -= cat APIs - -[partintro] --- +== cat APIs ["float",id="intro"] -== Introduction +=== Introduction JSON is great... for computers. Even if it's pretty-printed, trying to find relationships in the data is tedious. Human eyes, especially @@ -18,11 +15,11 @@ the available commands. [float] [[common-parameters]] -== Common parameters +=== Common parameters [float] [[verbose]] -=== Verbose +==== Verbose Each of the commands accepts a query string parameter `v` to turn on verbose output. For example: @@ -44,7 +41,7 @@ u_n93zwxThWHi1PDBJAGAg 127.0.0.1 127.0.0.1 u_n93zw [float] [[help]] -=== Help +==== Help Each of the commands accepts a query string parameter `help` which will output its available columns. For example: @@ -73,7 +70,7 @@ instead. [float] [[headers]] -=== Headers +==== Headers Each of the commands accepts a query string parameter `h` which forces only those columns to appear. For example: @@ -98,7 +95,7 @@ with `queue`. [float] [[numeric-formats]] -=== Numeric formats +==== Numeric formats Many commands provide a few types of numeric output, either a byte, size or a time value. By default, these types are human-formatted, @@ -129,7 +126,7 @@ If you want to change the <>, use `size` parameter. If you want to change the <>, use `bytes` parameter. [float] -=== Response as text, json, smile, yaml or cbor +==== Response as text, json, smile, yaml or cbor [source,sh] -------------------------------------------------- @@ -182,7 +179,7 @@ For example: [float] [[sort]] -=== Sort +==== Sort Each of the commands accepts a query string parameter `s` which sorts the table by the columns specified as the parameter value. Columns are specified either by name or by @@ -210,8 +207,6 @@ sushi_california_roll [*avocado*] 1 1 pizza_hawaiian [*pineapples*] 1 -------------------------------------------------- --- - include::cat/alias.asciidoc[] include::cat/allocation.asciidoc[] diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index a46a4c658d4b0..4fef1a85f9678 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -1,5 +1,5 @@ [[cat-alias]] -== cat aliases +=== cat aliases `aliases` shows information about currently configured aliases to indices including filter and routing infos. diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 8d2c931665ba1..e1aeecb8c956d 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -1,5 +1,5 @@ [[cat-allocation]] -== cat allocation +=== cat allocation `allocation` provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index 01fec6df73180..e0b31fd1bbaac 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -1,5 +1,5 @@ [[cat-count]] -== cat count +=== cat count `count` provides quick access to the document count of the entire cluster, or individual indices. diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index f94bcd6fe5d78..351f48b95d554 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -1,5 +1,5 @@ [[cat-fielddata]] -== cat fielddata +=== cat fielddata `fielddata` shows how much heap memory is currently being used by fielddata on every data node in the cluster. diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index 883119925fd5f..a6af320938d26 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -1,5 +1,5 @@ [[cat-health]] -== cat health +=== cat health `health` is a terse, one-line representation of the same information from `/_cluster/health`. @@ -75,7 +75,7 @@ static, we would have an idea that there is a problem. [float] [[timestamp]] -=== Why the timestamp? +==== Why the timestamp? You typically are using the `health` command when a cluster is malfunctioning. During this period, it's extremely important to diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 653889dac8315..0adf08eb18d16 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -1,5 +1,5 @@ [[cat-indices]] -== cat indices +=== cat indices The `indices` command provides a cross-section of each index. This information *spans nodes*. For example: @@ -37,7 +37,7 @@ is to use either the <> or the <> [float] [[pri-flag]] -=== Primaries +==== Primaries The index stats by default will show them for all of an index's shards, including replicas. A `pri` flag can be supplied to enable @@ -45,7 +45,7 @@ the view of relevant stats in the context of only the primaries. [float] [[examples]] -=== Examples +==== Examples Which indices are yellow? diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index a38baa455caee..9eca62b3242d1 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -1,5 +1,5 @@ [[cat-master]] -== cat master +=== cat master `master` doesn't have any extra options. It simply displays the master's node ID, bound IP address, and node name. For example: diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index e5c335f7c375f..c688959876315 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -1,5 +1,5 @@ [[cat-nodeattrs]] -== cat nodeattrs +=== cat nodeattrs The `nodeattrs` command shows custom node attributes. For example: @@ -32,7 +32,7 @@ and the `attr` and `value` columns give you the custom node attributes, one per line. [float] -=== Columns +==== Columns Below is an exhaustive list of the existing headers that can be passed to `nodeattrs?h=` to retrieve the relevant details in ordered diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index a882f5f9fa398..387044e6a95e0 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -1,5 +1,5 @@ [[cat-nodes]] -== cat nodes +=== cat nodes The `nodes` command shows the cluster topology. For example @@ -33,7 +33,7 @@ requested with `id` or `nodeId`) in its full length or in abbreviated form (the default). [float] -=== Columns +==== Columns Below is an exhaustive list of the existing headers that can be passed to `nodes?h=` to retrieve the relevant details in ordered diff --git a/docs/reference/cat/pending_tasks.asciidoc b/docs/reference/cat/pending_tasks.asciidoc index ec923f270c571..4fa96d784b8a3 100644 --- a/docs/reference/cat/pending_tasks.asciidoc +++ b/docs/reference/cat/pending_tasks.asciidoc @@ -1,5 +1,5 @@ [[cat-pending-tasks]] -== cat pending tasks +=== cat pending tasks `pending_tasks` provides the same information as the <> API in a diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index 9800b4fef0a64..6e9f4571698a6 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -1,5 +1,5 @@ [[cat-plugins]] -== cat plugins +=== cat plugins The `plugins` command provides a view per node of running plugins. This information *spans nodes*. diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index 1477dfb676f9f..f973b15dd1c96 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -1,5 +1,5 @@ [[cat-recovery]] -== cat recovery +=== cat recovery The `recovery` command is a view of index shard recoveries, both on-going and previously completed. It is a more compact view of the JSON <> API. diff --git a/docs/reference/cat/repositories.asciidoc b/docs/reference/cat/repositories.asciidoc index 89daf7748a5d4..a0a4263aa473b 100644 --- a/docs/reference/cat/repositories.asciidoc +++ b/docs/reference/cat/repositories.asciidoc @@ -1,5 +1,5 @@ [[cat-repositories]] -== cat repositories +=== cat repositories The `repositories` command shows the snapshot repositories registered in the cluster. For example: diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 5fa2f66e384a0..bab89d6a67c5f 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -1,5 +1,5 @@ [[cat-segments]] -== cat segments +=== cat segments The `segments` command provides low level information about the segments in the shards of an index. It provides information similar to the diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index 345e493375400..f7bea1ab93c49 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -1,5 +1,5 @@ [[cat-shards]] -== cat shards +=== cat shards The `shards` command is the detailed view of what nodes contain which shards. It will tell you if it's a primary or replica, the number of @@ -27,7 +27,7 @@ twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA [float] [[index-pattern]] -=== Index pattern +==== Index pattern If you have many shards, you may wish to limit which indices show up in the output. You can always do this with `grep`, but you can save @@ -54,7 +54,7 @@ twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA [float] [[relocation]] -=== Relocation +==== Relocation Let's say you've checked your health and you see relocating shards. Where are they from and where are they going? @@ -76,7 +76,7 @@ twitter 0 p RELOCATING 3014 31.1mb 192.168.56.10 H5dfFeA -> -> 192.168.56.30 bGG [float] [[states]] -=== Shard states +==== Shard states Before a shard can be used, it goes through an `INITIALIZING` state. `shards` can show you which ones. @@ -123,7 +123,7 @@ twitter 0 r UNASSIGNED ALLOCATION_FAILED [float] [[reason-unassigned]] -=== Reasons for unassigned shard +==== Reasons for unassigned shard These are the possible reasons for a shard to be in a unassigned state: diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index 37d57292e0488..ebe3f8322ef82 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -1,5 +1,5 @@ [[cat-snapshots]] -== cat snapshots +=== cat snapshots The `snapshots` command shows all snapshots that belong to a specific repository or multiple repositories. diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc index 41d27cfbf0306..6a6a810c40438 100644 --- a/docs/reference/cat/templates.asciidoc +++ b/docs/reference/cat/templates.asciidoc @@ -1,5 +1,5 @@ [[cat-templates]] -== cat templates +=== cat templates The `templates` command provides information about existing templates. diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index f63abc8903650..5440bc4e3ac4e 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -1,5 +1,5 @@ [[cat-thread-pool]] -== cat thread pool +=== cat thread pool The `thread_pool` command shows cluster wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for all thread pools. @@ -113,7 +113,7 @@ Here the host columns and the active, rejected and completed suggest thread pool All <> and custom thread pools are available. [float] -==== Thread Pool Fields +===== Thread Pool Fields For each thread pool, you can load details about it by using the field names in the table below. @@ -136,7 +136,7 @@ in the table below. |======================================================================= [float] -=== Other Fields +==== Other Fields In addition to details about each thread pool, it is also convenient to get an understanding of where those thread pools reside. As such, you can request diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index 84132d5a95ad4..ffe0e9e2fde92 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -54,8 +54,8 @@ This API deletes a configured collection of [[ccr-delete-auto-follow-pattern-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Specifies the auto-follow pattern collection to delete. +``:: + (Required, string) Specifies the auto-follow pattern collection to delete. [[ccr-delete-auto-follow-pattern-examples]] diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index 25122ab5f3a8b..23e0cc665da03 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -68,9 +68,9 @@ This API will return the specified auto-follow pattern collection. [[ccr-get-auto-follow-pattern-path-parms]] ==== {api-path-parms-title} -`` (Optional):: - (string) Specifies the auto-follow pattern collection that you want to - retrieve. If you do not specify a name, the API returns information for all +``:: + (Optional, string) Specifies the auto-follow pattern collection that you want + to retrieve. If you do not specify a name, the API returns information for all collections. [[ccr-get-auto-follow-pattern-examples]] diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index 6df2f2c2f82e5..89e2799f8e966 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -61,24 +61,24 @@ indices. [[ccr-put-auto-follow-pattern-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) The name of the collection of auto-follow patterns. +``:: + (Required, string) The name of the collection of auto-follow patterns. [[ccr-put-auto-follow-pattern-request-body]] ==== {api-request-body-title} -`remote_cluster` (Required):: - (string) The <> containing the - leader indices to match against. +`remote_cluster`:: + (Required, string) The <> containing + the leader indices to match against. -`leader_index_patterns` (Optional):: - (array) An array of simple index patterns to match against indices in the - remote cluster specified by the `remote_cluster` field. +`leader_index_patterns`:: + (Optional, array) An array of simple index patterns to match against indices + in the remote cluster specified by the `remote_cluster` field. -`follow_index_pattern` (Optional):: - (string) The name of follower index. The template `{{leader_index}}` can be - used to derive the name of the follower index from the name of the leader - index. +`follow_index_pattern`:: + (Optional, string) The name of follower index. The template `{{leader_index}}` + can be used to derive the name of the follower index from the name of the + leader index. include::../follow-request-body.asciidoc[] diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index b226f60ec85d8..c1111af8404ff 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -58,8 +58,8 @@ replication options and whether the follower indices are active or paused. [[ccr-get-follow-info-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) A comma-delimited list of follower index patterns. +``:: + (Required, string) A comma-delimited list of follower index patterns. [[ccr-get-follow-info-response-body]] ==== {api-response-body-title} diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index fe800a87c692a..87496b24d68ab 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -57,8 +57,8 @@ following tasks associated with each shard for the specified indices. [[ccr-get-follow-stats-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) A comma-delimited list of index patterns. +``:: + (Required, string) A comma-delimited list of index patterns. [[ccr-get-follow-stats-response-body]] ==== {api-response-body-title} diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc index 979148adc5783..96c8349848efc 100644 --- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -101,25 +101,25 @@ the <> is invoked. [[ccr-post-forget-follower-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) the name of the leader index +``:: + (Required, string) The name of the leader index. [[ccr-post-forget-follower-request-body]] ==== {api-request-body-title} -`follower_cluster` (Required):: - (string) The name of the cluster containing the follower index. +`follower_cluster`:: + (Required, string) The name of the cluster containing the follower index. -`follower_index` (Required):: - (string) The name of the follower index. +`follower_index`:: + (Required, string) The name of the follower index. -`follower_index_uuid` (Required):: - (string) The UUID of the follower index. +`follower_index_uuid`:: + (Required, string) The UUID of the follower index. -`leader_remote_cluster` (Required):: - (string) The alias (from the perspective of the cluster containing the - follower index) of the <> containing - the leader index. +`leader_remote_cluster`:: + (Required, string) The alias (from the perspective of the cluster containing + the follower index) of the <> + containing the leader index. [[ccr-post-forget-follower-examples]] ==== {api-examples-title} diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index b234719c606a0..f1926b8778db0 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -53,8 +53,8 @@ following task. [[ccr-post-pause-follow-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) The name of the follower index. +``:: + (Required, string) The name of the follower index. [[ccr-post-pause-follow-examples]] ==== {api-examples-title} diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 19c10b51f878a..ac39e4d285172 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -67,8 +67,8 @@ returns, the follower index will resume fetching operations from the leader inde [[ccr-post-resume-follow-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) The name of the follower index. +``:: + (Required, string) The name of the follower index. [[ccr-post-resume-follow-request-body]] ==== {api-request-body-title} diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index 405a3547a78c4..d2b3391767bc4 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -60,8 +60,8 @@ irreversible operation. [[ccr-post-unfollow-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) The name of the follower index. +``:: + (Required, string) The name of the follower index. [[ccr-post-unfollow-examples]] ==== {api-examples-title} diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index b54806815410c..7582b23757666 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -56,14 +56,14 @@ referenced leader index. When this API returns, the follower index exists, and [[ccr-put-follow-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) The name of the follower index. +``:: + (Required, string) The name of the follower index. [[ccr-put-follow-query-params]] ==== {api-query-parms-title} -`wait_for_active_shards` (Optional):: - (integer) Specifies the number of shards to wait on being active before +`wait_for_active_shards`:: + (Optional, integer) Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index being active. Restoring a follower shard requires transferring all the remote Lucene segment files to @@ -73,12 +73,12 @@ referenced leader index. When this API returns, the follower index exists, and [[ccr-put-follow-request-body]] ==== {api-request-body-title} -`remote_cluster` (Required):: - (string) The <> containing the leader - index. +`leader_index`:: + (Required, string) The name of the index in the leader cluster to follow. -`leader_index` (Required):: - (string) The name of the index in the leader cluster to follow. +`remote_cluster`:: + (Required, string) The <> containing + the leader index. include::../follow-request-body.asciidoc[] diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index da5ce2b410281..81b0b2ae3d880 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -1,10 +1,8 @@ [[cluster]] -= Cluster APIs +== Cluster APIs -[partintro] --- ["float",id="cluster-nodes"] -== Node specification +=== Node specification Some cluster-level APIs may operate on a subset of the nodes which can be specified with _node filters_. For example, the <>, @@ -81,7 +79,6 @@ GET /_nodes/ra*:2 GET /_nodes/ra*:2* -------------------------------------------------- // CONSOLE --- include::cluster/health.asciidoc[] diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 615a8a0108427..90ec04db60ae7 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -1,5 +1,5 @@ [[cluster-allocation-explain]] -== Cluster Allocation Explain API +=== Cluster Allocation Explain API The purpose of the cluster allocation explain API is to provide explanations for shard allocations in the cluster. For unassigned shards, @@ -11,7 +11,7 @@ a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. [float] -=== Explain API Request +==== Explain API Request To explain the allocation of a shard, first an index should exist: @@ -68,7 +68,7 @@ GET /_cluster/allocation/explain // CONSOLE [float] -=== Explain API Response +==== Explain API Response This section includes examples of the cluster allocation explain API response output under various scenarios. diff --git a/docs/reference/cluster/get-settings.asciidoc b/docs/reference/cluster/get-settings.asciidoc index b6ea5db1f6dbb..ac5df7b7aa876 100644 --- a/docs/reference/cluster/get-settings.asciidoc +++ b/docs/reference/cluster/get-settings.asciidoc @@ -1,5 +1,5 @@ [[cluster-get-settings]] -== Cluster Get Settings +=== Cluster Get Settings The cluster get settings API allows to retrieve the cluster wide settings. diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index d75ce77d1af80..db745c86a0b38 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -1,5 +1,5 @@ [[cluster-health]] -== Cluster Health +=== Cluster Health The cluster health API allows to get a very simple status on the health of the cluster. For example, on a quiet single node cluster with a single index @@ -70,7 +70,7 @@ GET /_cluster/health?wait_for_status=yellow&timeout=50s [float] [[request-params]] -=== Request Parameters +==== Request Parameters The cluster health API accepts the following request parameters: diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index 541ee51a58adb..82994058e2042 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -1,5 +1,5 @@ [[cluster-nodes-hot-threads]] -== Nodes hot_threads +=== Nodes hot_threads This API yields a breakdown of the hot threads on each selected node in the cluster. Its endpoints are `/_nodes/hot_threads` and diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc index c51167ca9f974..679ff51a695a0 100644 --- a/docs/reference/cluster/nodes-info.asciidoc +++ b/docs/reference/cluster/nodes-info.asciidoc @@ -1,5 +1,5 @@ [[cluster-nodes-info]] -== Nodes Info +=== Nodes Info The cluster nodes info API allows to retrieve one or more (or all) of the cluster nodes information. diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index bb24dffd40f7d..452d821bedd9d 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -1,8 +1,8 @@ [[cluster-nodes-stats]] -== Nodes Stats +=== Nodes Stats [float] -=== Nodes statistics +==== Nodes statistics The cluster nodes stats API allows to retrieve one or more (or all) of the cluster nodes statistics. @@ -85,7 +85,7 @@ All stats can be explicitly requested via `/_nodes/stats/_all` or `/_nodes/stats [float] [[fs-info]] -==== FS information +===== FS information The `fs` flag can be set to retrieve information that concern the file system: @@ -176,7 +176,7 @@ information that concern the file system: [float] [[os-stats]] -==== Operating System statistics +===== Operating System statistics The `os` flag can be set to retrieve statistics that concern the operating system: @@ -280,7 +280,7 @@ and `/sys/fs/cgroup/cpuacct`. [float] [[process-stats]] -==== Process statistics +===== Process statistics The `process` flag can be set to retrieve statistics that concern the current running process: @@ -305,7 +305,7 @@ the current running process: [float] [[node-indices-stats]] -=== Indices statistics +==== Indices statistics You can get information about indices stats on `node`, `indices`, or `shards` level. @@ -346,7 +346,7 @@ Supported metrics are: [float] [[search-groups]] -=== Search groups +==== Search groups You can get statistics about search groups for searches executed on this node. @@ -363,7 +363,7 @@ GET /_nodes/stats/indices?groups=foo,bar [float] [[ingest-stats]] -=== Ingest statistics +==== Ingest statistics The `ingest` flag can be set to retrieve statistics that concern ingest: @@ -383,7 +383,7 @@ On top of these overall ingest statistics, these statistics are also provided on [float] [[adaptive-selection-stats]] -=== Adaptive selection statistics +==== Adaptive selection statistics The `adaptive_selection` flag can be set to retrieve statistics that concern <>. These statistics are diff --git a/docs/reference/cluster/nodes-usage.asciidoc b/docs/reference/cluster/nodes-usage.asciidoc index 2d71e74959708..06473d3380fec 100644 --- a/docs/reference/cluster/nodes-usage.asciidoc +++ b/docs/reference/cluster/nodes-usage.asciidoc @@ -1,8 +1,8 @@ [[cluster-nodes-usage]] -== Nodes Feature Usage +=== Nodes Feature Usage [float] -=== Nodes usage +==== Nodes usage The cluster nodes usage API allows to retrieve information on the usage of features for each node. @@ -23,7 +23,7 @@ second command selectively retrieves nodes usage of only `nodeId1` and [float] [[rest-usage]] -==== REST actions usage information +===== REST actions usage information The `rest_actions` field in the response contains a map of the REST actions classname with a count of the number of times that action has diff --git a/docs/reference/cluster/pending.asciidoc b/docs/reference/cluster/pending.asciidoc index c64890cd31271..896744ce18deb 100644 --- a/docs/reference/cluster/pending.asciidoc +++ b/docs/reference/cluster/pending.asciidoc @@ -1,5 +1,5 @@ [[cluster-pending]] -== Pending cluster tasks +=== Pending cluster tasks The pending cluster tasks API returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index 4d2af7b6e8f01..fa550c22b75a7 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -1,5 +1,5 @@ [[cluster-remote-info]] -== Remote Cluster Info +=== Remote Cluster Info The cluster remote info API allows to retrieve all of the configured remote cluster information. diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc index 276a43f660d8b..7ff608834075f 100644 --- a/docs/reference/cluster/reroute.asciidoc +++ b/docs/reference/cluster/reroute.asciidoc @@ -1,5 +1,5 @@ [[cluster-reroute]] -== Cluster Reroute +=== Cluster Reroute The reroute command allows for manual changes to the allocation of individual shards in the cluster. For example, a shard can be moved from one node to @@ -77,7 +77,7 @@ The commands supported are: <> into account. [float] -=== Retrying failed allocations +==== Retrying failed allocations The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving @@ -90,7 +90,7 @@ calling the <> API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. [float] -=== Forced allocation on unrecoverable errors +==== Forced allocation on unrecoverable errors Two more commands are available that allow the allocation of a primary shard to a node. These commands should however be used with extreme care, as primary diff --git a/docs/reference/cluster/state.asciidoc b/docs/reference/cluster/state.asciidoc index 34bb69a552c66..69e497780f281 100644 --- a/docs/reference/cluster/state.asciidoc +++ b/docs/reference/cluster/state.asciidoc @@ -1,5 +1,5 @@ [[cluster-state]] -== Cluster State +=== Cluster State The cluster state API allows access to metadata representing the state of the whole cluster. This includes information such as @@ -39,7 +39,7 @@ retrieve the cluster state local to a particular node by adding `local=true` to the query string. [float] -=== Response Filters +==== Response Filters The cluster state contains information about all the indices in the cluster, including their mappings, as well as templates and other metadata. This means it diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 259d8ae2bba11..e51c66880f6ea 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1,5 +1,5 @@ [[cluster-stats]] -== Cluster Stats +=== Cluster Stats The Cluster Stats API allows to retrieve statistics from a cluster wide perspective. The API returns basic index metrics (shard numbers, store size, memory usage) and diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index b429edcd9dee5..9f2e6ee7e1ef4 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -1,10 +1,10 @@ [[tasks]] -== Task Management API +=== Task Management API beta[The Task Management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible] [float] -=== Current Tasks Information +==== Current Tasks Information The task management API allows to retrieve information about the tasks currently executing on one or more nodes in the cluster. @@ -177,7 +177,7 @@ GET _cat/tasks?detailed [float] [[task-cancellation]] -=== Task Cancellation +==== Task Cancellation If a long-running task supports cancellation, it can be cancelled with the cancel tasks API. The following example cancels task `oTUltX4IQMOUUVeiohTt8A:12345`: @@ -199,7 +199,7 @@ POST _tasks/_cancel?nodes=nodeId1,nodeId2&actions=*reindex // CONSOLE [float] -=== Task Grouping +==== Task Grouping The task lists returned by task API commands can be grouped either by nodes (default) or by parent tasks using the `group_by` parameter. The following command will change the grouping to parent tasks: @@ -219,7 +219,7 @@ GET _tasks?group_by=none // CONSOLE [float] -=== Identifying running tasks +==== Identifying running tasks The `X-Opaque-Id` header, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the `headers` field for in the task information. This allows to track certain calls, or associate certain tasks with diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index d21d84ba8f230..ae5f3c2f7b75c 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -1,5 +1,5 @@ [[cluster-update-settings]] -== Cluster Update Settings +=== Cluster Update Settings Use this API to review and change cluster-wide settings. @@ -102,7 +102,7 @@ PUT /_cluster/settings [float] -=== Order of Precedence +==== Order of Precedence The order of precedence for cluster settings is: diff --git a/docs/reference/cluster/voting-exclusions.asciidoc b/docs/reference/cluster/voting-exclusions.asciidoc index fcef8113912c4..82ef64ee9d318 100644 --- a/docs/reference/cluster/voting-exclusions.asciidoc +++ b/docs/reference/cluster/voting-exclusions.asciidoc @@ -1,5 +1,5 @@ [[voting-config-exclusions]] -== Voting configuration exclusions API +=== Voting configuration exclusions API ++++ Voting Configuration Exclusions ++++ @@ -8,20 +8,20 @@ Adds or removes master-eligible nodes from the <>. [float] -=== Request +==== Request `POST _cluster/voting_config_exclusions/` + `DELETE _cluster/voting_config_exclusions` [float] -=== Path parameters +==== Path parameters `node_name`:: A <> that identifies {es} nodes. [float] -=== Description +==== Description By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at @@ -58,7 +58,7 @@ maintain the voting configuration. For more information, see <>. [float] -=== Examples +==== Examples Add `nodeId1` to the voting configuration exclusions list: [source,js] diff --git a/docs/reference/data-frames/apis/delete-transform.asciidoc b/docs/reference/data-frames/apis/delete-transform.asciidoc index d772bc3c15d89..7c3e4e53c2886 100644 --- a/docs/reference/data-frames/apis/delete-transform.asciidoc +++ b/docs/reference/data-frames/apis/delete-transform.asciidoc @@ -8,40 +8,40 @@ Delete {dataframe-transforms} ++++ -beta[] - Deletes an existing {dataframe-transform}. -[discrete] +beta[] + [[delete-data-frame-transform-request]] ==== {api-request-title} `DELETE _data_frame/transforms/` -[discrete] [[delete-data-frame-transform-prereqs]] ==== {api-prereq-title} +* Before you can delete the {dataframe-transform}, you must stop it. * If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. For more information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[discrete] -[[delete-data-frame-transform-desc]] -==== {api-description-title} -NOTE: Before you can delete the {dataframe-transform}, you must stop it. - -[discrete] [[delete-data-frame-transform-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dataframe-transform}. +``:: + (Required, string) Identifier for the {dataframe-transform}. + +[[delete-data-frame-transform-query-parms]] +==== {api-query-parms-title} + +`force`:: +(Optional, boolean) When `true`, the {dataframe-transform} is deleted regardless of its +current state. The default value is `false`, meaning that the {dataframe-transform} must be +`stopped` before it can be deleted. -[discrete] [[delete-data-frame-transform-examples]] ==== {api-examples-title} diff --git a/docs/reference/data-frames/apis/get-transform-stats.asciidoc b/docs/reference/data-frames/apis/get-transform-stats.asciidoc index 889a109b8a376..2abb50407fdaf 100644 --- a/docs/reference/data-frames/apis/get-transform-stats.asciidoc +++ b/docs/reference/data-frames/apis/get-transform-stats.asciidoc @@ -8,15 +8,14 @@ Get {dataframe-transform} statistics ++++ +Retrieves usage information for {dataframe-transforms}. + beta[] -Retrieves usage information for {dataframe-transforms}. -[discrete] [[get-data-frame-transform-stats-request]] ==== {api-request-title} - `GET _data_frame/transforms//_stats` `GET _data_frame/transforms/,/_stats` + @@ -27,7 +26,7 @@ Retrieves usage information for {dataframe-transforms}. `GET _data_frame/transforms/*/_stats` + -[discrete] + [[get-data-frame-transform-stats-prereqs]] ==== {api-prereq-title} @@ -37,7 +36,7 @@ Retrieves usage information for {dataframe-transforms}. see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[discrete] + [[get-data-frame-transform-stats-desc]] ==== {api-description-title} @@ -47,22 +46,22 @@ You can get statistics for all {dataframe-transforms} by using `_all`, by specifying `*` as the ``, or by omitting the ``. -[discrete] + [[get-data-frame-transform-stats-path-parms]] ==== {api-path-parms-title} -`` (Optional):: - (string) Identifier for the {dataframe-transform}. It can be a +``:: + (Optional, string) Identifier for the {dataframe-transform}. It can be a {dataframe-transform} identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all {dataframe-transforms}. -[discrete] + [[get-data-frame-transform-stats-query-parms]] ==== {api-query-parms-title} -`allow_no_match` (Optional):: - (boolean) Specifies what to do when the request: +`allow_no_match`:: + (Optional, boolean) Specifies what to do when the request: + -- * Contains wildcard expressions and there are no {dataframe-transforms} that match. @@ -75,15 +74,13 @@ If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. -- -`from` (Optional):: - (integer) Skips the specified number of {dataframe-transforms}. The +`from`:: + (Optional, integer) Skips the specified number of {dataframe-transforms}. The default value is `0`. -`size` (Optional):: - (integer) Specifies the maximum number of {dataframe-transforms} to obtain. - The default value is `100`. +`size`:: + (Optional, integer) Specifies the maximum number of {dataframe-transforms} to obtain. The default value is `100`. -[discrete] [[get-data-frame-transform-stats-response]] ==== {api-response-body-title} @@ -98,7 +95,6 @@ are no matches or only partial matches. If `allow_no_match` is `false`, this code indicates that there are no resources that match the request or only partial matches for the request. -[discrete] [[get-data-frame-transform-stats-example]] ==== Examples diff --git a/docs/reference/data-frames/apis/get-transform.asciidoc b/docs/reference/data-frames/apis/get-transform.asciidoc index 6a7d4707e9f26..0acf991402bfe 100644 --- a/docs/reference/data-frames/apis/get-transform.asciidoc +++ b/docs/reference/data-frames/apis/get-transform.asciidoc @@ -8,11 +8,10 @@ Get {dataframe-transforms} ++++ -beta[] - Retrieves configuration information for {dataframe-transforms}. -[discrete] +beta[] + [[get-data-frame-transform-request]] ==== {api-request-title} @@ -26,7 +25,6 @@ Retrieves configuration information for {dataframe-transforms}. `GET _data_frame/transforms/*` -[discrete] [[get-data-frame-transform-prereqs]] ==== {api-prereq-title} @@ -36,7 +34,6 @@ Retrieves configuration information for {dataframe-transforms}. see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[discrete] [[get-data-frame-transform-desc]] ==== {api-description-title} @@ -46,22 +43,20 @@ You can get information for all {dataframe-transforms} by using `_all`, by specifying `*` as the ``, or by omitting the ``. -[discrete] [[get-data-frame-transform-path-parms]] ==== {api-path-parms-title} -`` (Optional):: - (string) Identifier for the {dataframe-transform}. It can be a +``:: + (Optional, string) Identifier for the {dataframe-transform}. It can be a {dataframe-transform} identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all {dataframe-transforms}. -[discrete] [[get-data-frame-transform-query-parms]] ==== {api-query-parms-title} -`allow_no_match` (Optional):: -(boolean) Specifies what to do when the request: +`allow_no_match`:: +(Optional, boolean) Specifies what to do when the request: + -- * Contains wildcard expressions and there are no {dataframe-transforms} that match. @@ -74,15 +69,13 @@ If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. -- -`from` (Optional):: - (integer) Skips the specified number of {dataframe-transforms}. The +`from`:: + (Optional, integer) Skips the specified number of {dataframe-transforms}. The default value is `0`. -`size` (Optional):: - (integer) Specifies the maximum number of {dataframe-transforms} to obtain. - The default value is `100`. +`size`:: + (Optional, integer) Specifies the maximum number of {dataframe-transforms} to obtain. The default value is `100`. -[discrete] [[get-data-frame-transform-response]] ==== {api-response-body-title} @@ -97,7 +90,6 @@ are no matches or only partial matches. If `allow_no_match` is `false`, this code indicates that there are no resources that match the request or only partial matches for the request. -[discrete] [[get-data-frame-transform-example]] ==== {api-examples-title} @@ -143,6 +135,7 @@ The API returns the following results: "dest" : { "index" : "kibana_sample_data_ecommerce_transform" }, + "frequency": "1m", "pivot" : { "group_by" : { "customer_id" : { diff --git a/docs/reference/data-frames/apis/index.asciidoc b/docs/reference/data-frames/apis/index.asciidoc index b3fa17e3c48be..3a40948bd667b 100644 --- a/docs/reference/data-frames/apis/index.asciidoc +++ b/docs/reference/data-frames/apis/index.asciidoc @@ -1,18 +1,17 @@ [role="xpack"] [testenv="basic"] [[data-frame-apis]] -== {dataframe-cap} APIs +== {dataframe-transform-cap} APIs -[float] -[[data-frame-transform-apis]] -=== {dataframe-transforms-cap} +See also {stack-ov}/ml-dataframes.html[{dataframe-transforms-cap}]. -* <> -* <> or -<> -* <> or -<> -* <> or <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> //CREATE include::put-transform.asciidoc[] diff --git a/docs/reference/data-frames/apis/preview-transform.asciidoc b/docs/reference/data-frames/apis/preview-transform.asciidoc index 42e01e557ddd3..c0b0f6ad88a4d 100644 --- a/docs/reference/data-frames/apis/preview-transform.asciidoc +++ b/docs/reference/data-frames/apis/preview-transform.asciidoc @@ -8,17 +8,15 @@ Preview {dataframe-transforms} ++++ -beta[] - Previews a {dataframe-transform}. -[discrete] +beta[] + [[preview-data-frame-transform-request]] ==== {api-request-title} `POST _data_frame/transforms/_preview` -[discrete] [[preview-data-frame-transform-prereq]] ==== {api-prereq-title} @@ -30,7 +28,6 @@ Previews a {dataframe-transform}. {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[discrete] [[preview-data-frame-transform-desc]] ==== {api-description-title} @@ -39,18 +36,27 @@ This API generates a preview of the results that you will get when you run the configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. -[discrete] [[preview-data-frame-transform-request-body]] ==== {api-request-body-title} -`source` (Required):: - (object) The source index or index pattern. - -`pivot` (Required):: - (object) Defines the pivot function `group by` fields and the aggregation to - reduce the data. See <>. +`source`:: + (Required, object) The source configuration, which has the following + properties: + + `index`::: + (Required, string or array) The _source indices_ for the + {dataframe-transform}. It can be a single index, an index pattern (for + example, `"myindex*"`), or an array of indices (for example, + `["index1", "index2"]`). + + `query`::: + (Optional, object) A query clause that retrieves a subset of data from the + source index. See <>. + +`pivot`:: + (Required, object) Defines the pivot function `group by` fields and the + aggregation to reduce the data. See <>. -[discrete] [[preview-data-frame-transform-response]] ==== {api-response-body-title} @@ -59,7 +65,6 @@ on all the current data in the source index. representation of the documents that would be created in the destination index by the {dataframe-transform}. -[discrete] ==== {api-examples-title} [source,js] diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index abc5779e12a88..5d5fcb482818d 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -8,17 +8,15 @@ Create {dataframe-transforms} ++++ -beta[] - Instantiates a {dataframe-transform}. -[discrete] +beta[] + [[put-data-frame-transform-request]] ==== {api-request-title} `PUT _data_frame/transforms/` -[discrete] [[put-data-frame-transform-prereqs]] ==== {api-prereq-title} @@ -30,45 +28,102 @@ have `read` and `view_index_metadata` privileges on the source index and `read`, information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[discrete] [[put-data-frame-transform-desc]] ==== {api-description-title} +When the {dataframe-transform} is created, a series of validations occur to +ensure its success. For example, there is a check for the existence of the +source indices and a check that the destination index is not part of the source +index pattern. You can use the `defer_validation` parameter to skip these +checks. + IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. Do not put a {dataframe-transform} directly into any `.data-frame-internal*` indices using the Elasticsearch index API. If {es} {security-features} are enabled, do not give users any privileges on `.data-frame-internal*` indices. -[discrete] [[put-data-frame-transform-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dataframe-transform}. This identifier can contain - lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It - must start and end with alphanumeric characters. +``:: + (Required, string) Identifier for the {dataframe-transform}. This identifier + can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and + underscores. It must start and end with alphanumeric characters. + +[[put-data-frame-transform-query-parms]] +==== {api-query-parms-title} + +`defer_validation`:: + (Optional, boolean) When `true`, deferrable validations are not run. This + behavior may be desired if the source index does not exist until after the + {dataframe-transform} is created. Deferred validations are always run when the + {dataframe-transform} is started, with the exception of privilege checks. If + the user who created the transform does not have the required privileges on + the source and destination indices, the transform starts but then fails when + it attempts the unauthorized operation. The default value is `false`. -[discrete] [[put-data-frame-transform-request-body]] ==== {api-request-body-title} -`description` (Optional):: - (string) Free text description of the {dataframe-transform}. +`description`:: + (Optional, string) Free text description of the {dataframe-transform}. + +`dest`:: + (Required, object) Required. The destination configuration, which has the + following properties: + + `index`::: + (Required, string) The _destination index_ for the {dataframe-transform}. + + `pipeline`::: + (Optional, string) The unique identifier for a <>. -`dest` (Required):: - (object) The destination configuration, which consists of `index` and - optionally a `pipeline` id. See <>. +`frequency`:: + (Optional, time units) The interval between checks for changes in the source + indices when the {dataframe-transform} is running continuously. Also determines + the retry interval in the event of transient failures while the {dataframe-transform} is + searching or indexing. The minimum value is `1s` and the maximum is `1h`. The + default value is `1m`. -`pivot` (Optional):: - (object) Defines the pivot function `group by` fields and the aggregation to +`pivot`:: + (Required, object) Defines the pivot function `group by` fields and the aggregation to reduce the data. See <>. -`source` (Required):: - (object) The source configuration, which consists of `index` and optionally - a `query`. See <>. +`source`:: + (Required, object) The source configuration, which has the following + properties: + + `index`::: + (Required, string or array) The _source indices_ for the + {dataframe-transform}. It can be a single index, an index pattern (for + example, `"myindex*"`), or an array of indices (for example, + `["index1", "index2"]`). + + `query`::: + (Optional, object) A query clause that retrieves a subset of data from the + source index. See <>. + +`sync`:: + (Optional, object) Defines the properties required to run continuously. + `time`::: + (Required, object) Specifies that the {dataframe-transform} uses a time + field to synchronize the source and destination indices. + `field`:::: + (Required, string) The date field that is used to identify new documents + in the source. ++ +-- +TIP: In general, it’s a good idea to use a field that contains the +<>. If you use a different field, +you might need to set the `delay` such that it accounts for data transmission +delays. + +-- + `delay`:::: + (Optional, time units) The time delay between the current time and the + latest input data time. The default value is `60s`. -[discrete] [[put-data-frame-transform-example]] ==== {api-examples-title} @@ -86,10 +141,6 @@ PUT _data_frame/transforms/ecommerce_transform } } }, - "dest": { - "index": "kibana_sample_data_ecommerce_transform", - "pipeline": "add_timestamp_pipeline" - }, "pivot": { "group_by": { "customer_id": { @@ -106,11 +157,22 @@ PUT _data_frame/transforms/ecommerce_transform } } }, - "description": "Maximum priced ecommerce data by customer_id in Asia" + "description": "Maximum priced ecommerce data by customer_id in Asia", + "dest": { + "index": "kibana_sample_data_ecommerce_transform", + "pipeline": "add_timestamp_pipeline" + }, + "frequency": "5m", + "sync": { + "time": { + "field": "order_date", + "delay": "60s" + } + } } -------------------------------------------------- // CONSOLE -// TEST[skip: https://github.com/elastic/elasticsearch/issues/43271] +// TEST[setup:kibana_sample_data_ecommerce] When the transform is created, you receive the following results: [source,js] diff --git a/docs/reference/data-frames/apis/start-transform.asciidoc b/docs/reference/data-frames/apis/start-transform.asciidoc index b76bcb0dd4796..90f70efb4dbe2 100644 --- a/docs/reference/data-frames/apis/start-transform.asciidoc +++ b/docs/reference/data-frames/apis/start-transform.asciidoc @@ -8,17 +8,15 @@ Start {dataframe-transforms} ++++ -beta[] - Starts one or more {dataframe-transforms}. -[discrete] +beta[] + [[start-data-frame-transform-request]] ==== {api-request-title} `POST _data_frame/transforms//_start` -[discrete] [[start-data-frame-transform-prereqs]] ==== {api-prereq-title} @@ -29,16 +27,24 @@ have `view_index_metadata` privileges on the source index for the {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[discrete] +[[start-data-frame-transform-desc]] +==== {api-description-title} + +When a {dataframe-transform} starts, a series of validations occur to ensure its +success. If you deferred validation when you created the {dataframe-transform}, +they occur when you start the transform--with the exception of privilege checks. +If the user who created the transform does not have the required privileges on +the source and destination indices, the transform starts but then fails when +it attempts the unauthorized operation. + [[start-data-frame-transform-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dataframe-transform}. This identifier can contain - lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It - must start and end with alphanumeric characters. +``:: + (Required, string) Identifier for the {dataframe-transform}. This identifier + can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and + underscores. It must start and end with alphanumeric characters. -[discrete] [[start-data-frame-transform-example]] ==== {api-examples-title} diff --git a/docs/reference/data-frames/apis/stop-transform.asciidoc b/docs/reference/data-frames/apis/stop-transform.asciidoc index 80c2654babe0d..f6f7784ebb91f 100644 --- a/docs/reference/data-frames/apis/stop-transform.asciidoc +++ b/docs/reference/data-frames/apis/stop-transform.asciidoc @@ -8,11 +8,10 @@ Stop {dataframe-transforms} ++++ -beta[] - Stops one or more {dataframe-transforms}. -[discrete] +beta[] + [[stop-data-frame-transform-request]] ==== {api-request-title} @@ -22,7 +21,6 @@ Stops one or more {dataframe-transforms}. `POST _data_frame/transforms/_all/_stop` -[discrete] [[stop-data-frame-transform-prereq]] ==== {api-prereq-title} @@ -32,7 +30,6 @@ Stops one or more {dataframe-transforms}. see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[discrete] [[stop-data-frame-transform-desc]] ==== {api-description-title} @@ -41,21 +38,19 @@ comma-separated list of {dataframe-transforms} or a wildcard expression. All {dataframe-transforms} can be stopped by using `_all` or `*` as the ``. -[discrete] [[stop-data-frame-transform-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dataframe-transform}. This identifier can contain - lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It - must start and end with alphanumeric characters. +``:: + (Required, string) Identifier for the {dataframe-transform}. This identifier + can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and + underscores. It must start and end with alphanumeric characters. -[discrete] [[stop-data-frame-transform-query-parms]] ==== {api-query-parms-title} -`allow_no_match` (Optional):: -(boolean) Specifies what to do when the request: +`allow_no_match`:: +(Optional, boolean) Specifies what to do when the request: + -- * Contains wildcard expressions and there are no {dataframe-transforms} that match. @@ -73,20 +68,19 @@ If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. -- -`timeout` (Optional):: - (time value) If `wait_for_completion=true`, the API blocks for (at maximum) - the specified duration while waiting for the transform to stop. If more than - `timeout` time has passed, the API throws a timeout exception. Even if a - timeout exception is thrown, the stop request is still processing and +`timeout`:: + (Optional, time value) If `wait_for_completion=true`, the API blocks for (at + maximum) the specified duration while waiting for the transform to stop. If + more than `timeout` time has passed, the API throws a timeout exception. Even + if a timeout exception is thrown, the stop request is still processing and eventually moves the transform to `STOPPED`. The timeout simply means the API call itself timed out while waiting for the status change. Defaults to `30s` -`wait_for_completion` (Optional):: - (boolean) If set to `true`, causes the API to block until the indexer state - completely stops. If set to `false`, the API returns immediately and the +`wait_for_completion`:: + (Optional, boolean) If set to `true`, causes the API to block until the indexer + state completely stops. If set to `false`, the API returns immediately and the indexer will be stopped asynchronously in the background. Defaults to `false`. -[discrete] [[stop-data-frame-transform-response-codes]] ==== {api-response-codes-title} @@ -94,7 +88,6 @@ are no matches or only partial matches. If `allow_no_match` is `false`, this code indicates that there are no resources that match the request or only partial matches for the request. -[discrete] [[stop-data-frame-transform-example]] ==== {api-examples-title} diff --git a/docs/reference/data-frames/apis/transformresource.asciidoc b/docs/reference/data-frames/apis/transformresource.asciidoc index 8bb032f704c9b..4f6b43c43bc19 100644 --- a/docs/reference/data-frames/apis/transformresource.asciidoc +++ b/docs/reference/data-frames/apis/transformresource.asciidoc @@ -18,6 +18,13 @@ For more information, see `dest`:: (object) The destination for the {dataframe-transform}. See <>. + +`frequency`:: + (time units) The interval between checks for changes in the source indices + when the {dataframe-transform} is running continuously. Also determines the + retry interval in the event of transient failures while the {dataframe-transform} is + searching or indexing. The minimum value is `1s` and the maximum is `1h`. The + default value is `1m`. `id`:: (string) A unique identifier for the {dataframe-transform}. @@ -56,8 +63,10 @@ you create a {dataframe-transform}, you must define its source. [[data-frame-transform-source-properties]] ===== {api-definitions-title} -`index`:: - (array) The _source index_ for the {dataframe-transform}. +`index`:: + (string or array) The _source indices_ for the {dataframe-transform}. It can + be a single index, an index pattern (for example, `"myindex*"`), or an array + of indices (for example, `["index1", "index2"]`). `query`:: (object) A query clause that retrieves a subset of data from the source index. @@ -74,8 +83,8 @@ pivot function `group by` fields and the aggregation to reduce the data. ===== {api-definitions-title} `aggregations` or `aggs`:: -(object) Defines how to aggregate the grouped data. The following composite -aggregations are supported: + (object) Defines how to aggregate the grouped data. The following composite + aggregations are supported: + -- * {ref}/search-aggregations-metrics-avg-aggregation.html[Average] @@ -96,8 +105,8 @@ composite aggregations. See -- `group_by`:: -(object) Defines how to group the data. More than one grouping can be defined -per pivot. The following groupings are supported: + (object) Defines how to group the data. More than one grouping can be defined + per pivot. The following groupings are supported: + -- * {ref}/search-aggregations-bucket-composite-aggregation.html#_terms[Terms] @@ -105,8 +114,14 @@ per pivot. The following groupings are supported: * {ref}/search-aggregations-bucket-composite-aggregation.html#_date_histogram[Date Histogram] -- +`max_page_search_size`:: + (integer) Defines the initial page size to use for the composite aggregation + for each checkpoint. If circuit breaker exceptions occur, the page size is + dynamically adjusted to a lower value. The minimum value is `10` and the + maximum is `10,000`. The default value is `500`. + [[data-frame-transform-example]] ==== {api-examples-title} See the -<>. \ No newline at end of file +<>. diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index 5c4c471b0a131..a860bfc42a0da 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -1,8 +1,5 @@ [[docs]] -= Document APIs - -[partintro] --- +== Document APIs This section starts with a short introduction to Elasticsearch's <>, followed by a detailed description of the following CRUD APIs: @@ -23,8 +20,6 @@ detailed description of the following CRUD APIs: NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single index name, or an `alias` which points to a single index. --- - include::docs/data-replication.asciidoc[] include::docs/index_.asciidoc[] diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index e106c2b16eea7..440d2dd709318 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -1,5 +1,5 @@ [[docs-bulk]] -== Bulk API +=== Bulk API The bulk API makes it possible to perform many index/delete operations in a single API call. This can greatly increase the indexing speed. @@ -198,7 +198,7 @@ chunks, as this will slow things down. [float] [[bulk-optimistic-concurrency-control]] -=== Optimistic Concurrency Control +==== Optimistic Concurrency Control Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action @@ -209,7 +209,7 @@ documents. See <> for more details. [float] [[bulk-versioning]] -=== Versioning +==== Versioning Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the @@ -218,7 +218,7 @@ support the `version_type` (see <>). [float] [[bulk-routing]] -=== Routing +==== Routing Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the @@ -226,7 +226,7 @@ index / delete operation based on the `_routing` mapping. [float] [[bulk-wait-for-active-shards]] -=== Wait For Active Shards +==== Wait For Active Shards When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active @@ -236,7 +236,7 @@ example. [float] [[bulk-refresh]] -=== Refresh +==== Refresh Control when the changes made by this request are visible to search. See <>. @@ -250,7 +250,7 @@ participate in the `_bulk` request at all. [float] [[bulk-update]] -=== Update +==== Update When using the `update` action, `retry_on_conflict` can be used as a field in the action itself (not in the extra payload line), to specify how many @@ -280,11 +280,11 @@ POST _bulk [float] [[bulk-security]] -=== Security +==== Security See <>. [float] [[bulk-partial-responses]] -=== Partial responses +==== Partial responses To ensure fast responses, the bulk API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/docs/concurrency-control.asciidoc b/docs/reference/docs/concurrency-control.asciidoc index eeb5aca4ed032..4b02239ee6ced 100644 --- a/docs/reference/docs/concurrency-control.asciidoc +++ b/docs/reference/docs/concurrency-control.asciidoc @@ -1,5 +1,5 @@ [[optimistic-concurrency-control]] -== Optimistic concurrency control +=== Optimistic concurrency control Elasticsearch is distributed. When documents are created, updated, or deleted, the new version of the document has to be replicated to other nodes in the cluster. @@ -87,7 +87,7 @@ returns: Note: The <> can return the `_seq_no` and `_primary_term` -for each search hit by setting <>. +for each search hit by setting <>. The sequence number and the primary term uniquely identify a change. By noting down the sequence number and primary term returned, you can make sure to only change the diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index 28389fb05ba94..969e3dfd54ce2 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -1,9 +1,9 @@ [[docs-replication]] -== Reading and Writing documents +=== Reading and Writing documents [float] -=== Introduction +==== Introduction Each index in Elasticsearch is <> and each shard can have multiple copies. These copies are known as a _replication group_ and must be kept in sync when documents @@ -21,7 +21,7 @@ This purpose of this section is to give a high level overview of the Elasticsear it has for various interactions between write and read operations. [float] -=== Basic write model +==== Basic write model Every indexing operation in Elasticsearch is first resolved to a replication group using <>, typically based on the document ID. Once the replication group has been determined, @@ -43,7 +43,7 @@ The primary shard follows this basic flow: completion of the request to the client. [float] -==== Failure handling +===== Failure handling Many things can go wrong during indexing -- disks can get corrupted, nodes can be disconnected from each other, or some configuration mistake could cause an operation to fail on a replica despite it being successful on the primary. These @@ -84,7 +84,7 @@ issues can cause data loss. See <> for some mitiga ************ [float] -=== Basic read model +==== Basic read model Reads in Elasticsearch can be very lightweight lookups by ID or a heavy search request with complex aggregations that take non-trivial CPU power. One of the beauties of the primary-backup model is that it keeps all shard copies identical @@ -103,7 +103,7 @@ is as follows: [float] [[shard-failures]] -==== Shard failures +===== Shard failures When a shard fails to respond to a read request, the coordinating node sends the request to another shard copy in the same replication group. Repeated failures @@ -122,7 +122,7 @@ Shard failures are indicated by the `timed_out` and `_shards` fields of the response header. [float] -=== A few simple implications +==== A few simple implications Each of these basic flows determines how Elasticsearch behaves as a system for both reads and writes. Furthermore, since read and write requests can be executed concurrently, these two basic flows interact with each other. This has a few inherent implications: @@ -137,7 +137,7 @@ Two copies by default:: This model can be fault tolerant while maintaining only quorum-based system where the minimum number of copies for fault tolerance is 3. [float] -=== Failures +==== Failures Under failures, the following is possible: @@ -151,7 +151,7 @@ Dirty reads:: An isolated primary can expose writes that will not be acknowledge this risk by pinging the master every second (by default) and rejecting indexing operations if no master is known. [float] -=== The Tip of the Iceberg +==== The Tip of the Iceberg This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 4dbdd18ca02d3..518b8112d730a 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -1,5 +1,5 @@ [[docs-delete-by-query]] -== Delete By Query API +=== Delete By Query API The simplest usage of `_delete_by_query` just performs a deletion on every document that matches a query. Here is the API: @@ -138,7 +138,7 @@ POST twitter/_delete_by_query?scroll_size=5000 [float] -=== URL Parameters +==== URL Parameters In addition to the standard parameters like `pretty`, the delete by query API also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, @@ -187,7 +187,7 @@ cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". The default is `-1`. [float] -=== Response body +==== Response body ////////////////////////// @@ -294,7 +294,7 @@ version conflicts. [float] [[docs-delete-by-query-task-api]] -=== Works with the Task API +==== Works with the Task API You can fetch the status of any running delete by query requests with the <>: @@ -371,7 +371,7 @@ you to delete that document. [float] [[docs-delete-by-query-cancel-task-api]] -=== Works with the Cancel Task API +==== Works with the Cancel Task API Any delete by query can be canceled using the <>: @@ -390,7 +390,7 @@ has been cancelled and terminates itself. [float] [[docs-delete-by-query-rethrottle]] -=== Rethrottling +==== Rethrottling The value of `requests_per_second` can be changed on a running delete by query using the `_rethrottle` API: @@ -412,7 +412,7 @@ timeouts. [float] [[docs-delete-by-query-slice]] -=== Slicing +==== Slicing Delete by query supports <> to parallelize the deleting process. This parallelization can improve efficiency and provide a convenient way to @@ -420,7 +420,7 @@ break the request down into smaller parts. [float] [[docs-delete-by-query-manual-slice]] -==== Manual slicing +===== Manual slicing Slice a delete by query manually by providing a slice id and total number of slices to each request: @@ -495,7 +495,7 @@ Which results in a sensible `total` like this one: [float] [[docs-delete-by-query-automatic-slice]] -==== Automatic slicing +===== Automatic slicing You can also let delete-by-query automatically parallelize using <> to slice on `_id`. Use `slices` to specify the number of @@ -581,7 +581,7 @@ though these are all taken at approximately the same time. [float] [[docs-delete-by-query-picking-slices]] -===== Picking the number of slices +====== Picking the number of slices If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If you're slicing manually or otherwise tuning diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index b242741abd522..3c30f8a51987e 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -1,5 +1,5 @@ [[docs-delete]] -== Delete API +=== Delete API The delete API allows to delete a JSON document from a specific index based on its id. The following example deletes the JSON document @@ -37,7 +37,7 @@ The result of the above delete operation is: [float] [[optimistic-concurrency-control-delete]] -=== Optimistic concurrency control +==== Optimistic concurrency control Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary @@ -47,7 +47,7 @@ and a status code of 409. See <> for more detail [float] [[delete-versioning]] -=== Versioning +==== Versioning Each document indexed is versioned. When deleting a document, the `version` can be specified to make sure the relevant document we are trying to delete is @@ -60,7 +60,7 @@ determined by the `index.gc_deletes` index setting and defaults to 60 seconds. [float] [[delete-routing]] -=== Routing +==== Routing When indexing using the ability to control the routing, in order to delete a document, the routing value should also be provided. For @@ -97,7 +97,7 @@ the request. [float] [[delete-index-creation]] -=== Automatic index creation +==== Automatic index creation If an <> is used, the delete operation automatically creates an index if it has not been @@ -106,7 +106,7 @@ for manually creating an index). [float] [[delete-distributed]] -=== Distributed +==== Distributed The delete operation gets hashed into a specific shard id. It then gets redirected into the primary shard within that id group, and replicated @@ -114,7 +114,7 @@ redirected into the primary shard within that id group, and replicated [float] [[delete-wait-for-active-shards]] -=== Wait For Active Shards +==== Wait For Active Shards When making delete requests, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active @@ -124,7 +124,7 @@ example. [float] [[delete-refresh]] -=== Refresh +==== Refresh Control when the changes made by this request are visible to search. See <>. @@ -132,7 +132,7 @@ Control when the changes made by this request are visible to search. See [float] [[delete-timeout]] -=== Timeout +==== Timeout The primary shard assigned to perform the delete operation might not be available when the delete operation is executed. Some reasons for this diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index e84df1d5a9689..3e9f1dc053efa 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -1,5 +1,5 @@ [[docs-get]] -== Get API +=== Get API The get API allows to get a JSON document from the index based on its id. The following example gets a JSON document from an index called @@ -51,7 +51,7 @@ HEAD twitter/_doc/0 [float] [[realtime]] -=== Realtime +==== Realtime By default, the get API is realtime, and is not affected by the refresh rate of the index (when data will become visible for search). If a document @@ -62,7 +62,7 @@ one can set the `realtime` parameter to `false`. [float] [[get-source-filtering]] -=== Source filtering +==== Source filtering By default, the get operation returns the contents of the `_source` field unless you have used the `stored_fields` parameter or if the `_source` field is disabled. @@ -98,7 +98,7 @@ GET twitter/_doc/0?_source=*.id,retweeted [float] [[get-stored-fields]] -=== Stored Fields +==== Stored Fields The get operation allows specifying a set of stored fields that will be returned by passing the `stored_fields` parameter. @@ -219,7 +219,7 @@ will fail. [float] [[_source]] -=== Getting the +_source+ directly +==== Getting the +_source+ directly Use the `/{index}/_source/{id}` endpoint to get just the `_source` field of the document, @@ -253,7 +253,7 @@ HEAD twitter/_source/1 [float] [[get-routing]] -=== Routing +==== Routing When indexing using the ability to control the routing, in order to get a document, the routing value should also be provided. For example: @@ -271,7 +271,7 @@ document not to be fetched. [float] [[preference]] -=== Preference +==== Preference Controls a `preference` of which shard replicas to execute the get request on. By default, the operation is randomized between the shard @@ -292,7 +292,7 @@ Custom (string) value:: [float] [[get-refresh]] -=== Refresh +==== Refresh The `refresh` parameter can be set to `true` in order to refresh the relevant shard before the get operation and make it searchable. Setting @@ -302,7 +302,7 @@ indexing). [float] [[get-distributed]] -=== Distributed +==== Distributed The get operation gets hashed into a specific shard id. It then gets redirected to one of the replicas within that shard id and returns the @@ -313,7 +313,7 @@ better GET scaling we will have. [float] [[get-versioning]] -=== Versioning support +==== Versioning support You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. This behavior is the same diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 2c617832488b5..2ecd929ef55b4 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -1,5 +1,5 @@ [[docs-index_]] -== Index API +=== Index API IMPORTANT: See <>. @@ -54,7 +54,7 @@ NOTE: Replica shards may not all be started when an indexing operation success [float] [[index-creation]] -=== Automatic Index Creation +==== Automatic Index Creation The index operation automatically creates an index if it does not already exist, and applies any <> that are @@ -108,7 +108,7 @@ patterns are matched in the order in which they are given. [float] [[operation-type]] -=== Operation Type +==== Operation Type The index operation also accepts an `op_type` that can be used to force a `create` operation, allowing for "put-if-absent" behavior. When @@ -142,7 +142,7 @@ PUT twitter/_create/1 // CONSOLE [float] -=== Automatic ID Generation +==== Automatic ID Generation The index operation can be executed without specifying the id. In such a case, an id will be generated automatically. In addition, the `op_type` @@ -183,7 +183,7 @@ The result of the above index operation is: [float] [[optimistic-concurrency-control-index]] -=== Optimistic concurrency control +==== Optimistic concurrency control Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary @@ -193,7 +193,7 @@ and a status code of 409. See <> for more detail [float] [[index-routing]] -=== Routing +==== Routing By default, shard placement ? or `routing` ? is controlled by using a hash of the document's id value. For more explicit control, the value @@ -223,7 +223,7 @@ value is provided or extracted. [float] [[index-distributed]] -=== Distributed +==== Distributed The index operation is directed to the primary shard based on its route (see the Routing section above) and performed on the actual node @@ -232,7 +232,7 @@ if needed, the update is distributed to applicable replicas. [float] [[index-wait-for-active-shards]] -=== Wait For Active Shards +==== Wait For Active Shards To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies @@ -290,14 +290,14 @@ replication succeeded/failed. [float] [[index-refresh]] -=== Refresh +==== Refresh Control when the changes made by this request are visible to search. See <>. [float] [[index-noop]] -=== Noop Updates +==== Noop Updates When updating a document using the index API a new version of the document is always created even if the document hasn't changed. If this isn't acceptable @@ -312,7 +312,7 @@ Elasticsearch runs on the shard receiving the updates. [float] [[timeout]] -=== Timeout +==== Timeout The primary shard assigned to perform the index operation might not be available when the index operation is executed. Some reasons for this @@ -336,7 +336,7 @@ PUT twitter/_doc/1?timeout=5m [float] [[index-versioning]] -=== Versioning +==== Versioning Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments @@ -381,7 +381,7 @@ latest version will be used if the index operations arrive out of order for whatever reason. [float] -==== Version types +===== Version types Next to the `external` version type explained above, Elasticsearch also supports other types for specific use cases. Here is an overview of diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index 8d5dd2ad74a3d..3360f2c06ffa4 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -1,5 +1,5 @@ [[docs-multi-get]] -== Multi Get API +=== Multi Get API The Multi get API returns multiple documents based on an index, type, (optional) and id (and possibly routing). The response includes a `docs` array @@ -84,7 +84,7 @@ GET /test/_doc/_mget [float] [[mget-source-filtering]] -=== Source filtering +==== Source filtering By default, the `_source` field will be returned for every document (if stored). Similar to the <> API, you can retrieve only parts of @@ -128,7 +128,7 @@ GET /_mget [float] [[mget-fields]] -=== Fields +==== Fields Specific stored fields can be specified to be retrieved per document to get, similar to the <> parameter of the Get API. For example: @@ -179,7 +179,7 @@ GET /test/_doc/_mget?stored_fields=field1,field2 [float] [[mget-routing]] -=== Routing +==== Routing You can also specify a routing value as a parameter: @@ -209,11 +209,11 @@ document `test/_doc/1` will be fetched from the shard corresponding to routing k [float] [[mget-security]] -=== Security +==== Security See <>. [float] [[multi-get-partial-responses]] -=== Partial responses +==== Partial responses To ensure fast responses, the multi get API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/docs/multi-termvectors.asciidoc b/docs/reference/docs/multi-termvectors.asciidoc index 6d947d2d4036a..df00b39ef425b 100644 --- a/docs/reference/docs/multi-termvectors.asciidoc +++ b/docs/reference/docs/multi-termvectors.asciidoc @@ -1,5 +1,5 @@ [[docs-multi-termvectors]] -== Multi termvectors API +=== Multi termvectors API Multi termvectors API allows to get multiple termvectors at once. The documents from which to retrieve the term vectors are specified by an index and id. diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc index 121e0c494d828..127ec9fc6fd9d 100644 --- a/docs/reference/docs/refresh.asciidoc +++ b/docs/reference/docs/refresh.asciidoc @@ -1,5 +1,5 @@ [[docs-refresh]] -== `?refresh` +=== `?refresh` The <>, <>, <>, and <> APIs support setting `refresh` to control when changes made @@ -30,7 +30,7 @@ Take no refresh related actions. The changes made by this request will be made visible at some point after the request returns. [float] -=== Choosing which setting to use +==== Choosing which setting to use Unless you have a good reason to wait for the change to become visible always use `refresh=false`, or, because that is the default, just leave the `refresh` @@ -64,7 +64,7 @@ general, if you have a running system you don't wish to disturb then [float] [[refresh_wait_for-force-refresh]] -=== `refresh=wait_for` Can Force a Refresh +==== `refresh=wait_for` Can Force a Refresh If a `refresh=wait_for` request comes in when there are already `index.max_refresh_listeners` (defaults to 1000) requests waiting for a refresh @@ -79,7 +79,7 @@ Bulk requests only take up one slot on each shard that they touch no matter how many times they modify the shard. [float] -=== Examples +==== Examples These will create a document and immediately refresh the index so it is visible: diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 7717506a2d23e..6d973dff0c483 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1,5 +1,5 @@ [[docs-reindex]] -== Reindex API +=== Reindex API IMPORTANT: Reindex requires <> to be enabled for all documents in the source index. @@ -392,7 +392,7 @@ POST _reindex [float] [[reindex-from-remote]] -=== Reindex from Remote +==== Reindex from Remote Reindex supports reindexing from a remote Elasticsearch cluster: @@ -525,7 +525,7 @@ POST _reindex [float] [[reindex-ssl]] -==== Configuring SSL parameters +===== Configuring SSL parameters Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the @@ -617,7 +617,7 @@ Defaults to the keystore password. This setting cannot be used with `reindex.ssl.keystore.key_password`. [float] -=== URL Parameters +==== URL Parameters In addition to the standard parameters like `pretty`, the Reindex API also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, @@ -667,7 +667,7 @@ starting the next set. This is "bursty" instead of "smooth". The default value i [float] [[docs-reindex-response-body]] -=== Response body +==== Response body ////////////////////////// [source,js] @@ -781,7 +781,7 @@ the `conflicts` option to prevent reindex from aborting on version conflicts. [float] [[docs-reindex-task-api]] -=== Works with the Task API +==== Works with the Task API You can fetch the status of all running reindex requests with the <>: @@ -868,7 +868,7 @@ you to delete that document. [float] [[docs-reindex-cancel-task-api]] -=== Works with the Cancel Task API +==== Works with the Cancel Task API Any reindex can be canceled using the <>. For example: @@ -887,7 +887,7 @@ API will continue to list the task until it wakes to cancel itself. [float] [[docs-reindex-rethrottle]] -=== Rethrottling +==== Rethrottling The value of `requests_per_second` can be changed on a running reindex using the `_rethrottle` API: @@ -909,7 +909,7 @@ timeouts. [float] [[docs-reindex-change-name]] -=== Reindex to change the name of a field +==== Reindex to change the name of a field `_reindex` can be used to build a copy of an index with renamed fields. Say you create an index containing documents that look like this: @@ -976,7 +976,7 @@ which will return: [float] [[docs-reindex-slice]] -=== Slicing +==== Slicing Reindex supports <> to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to @@ -988,7 +988,7 @@ NOTE: Reindexing from remote clusters does not support [float] [[docs-reindex-manual-slice]] -==== Manual slicing +===== Manual slicing Slice a reindex request manually by providing a slice id and total number of slices to each request: @@ -1051,7 +1051,7 @@ which results in a sensible `total` like this one: [float] [[docs-reindex-automatic-slice]] -==== Automatic slicing +===== Automatic slicing You can also let `_reindex` automatically parallelize using <> to slice on `_uid`. Use `slices` to specify the number of slices to use: @@ -1125,7 +1125,7 @@ though these are all taken at approximately the same time. [float] [[docs-reindex-picking-slices]] -===== Picking the number of slices +====== Picking the number of slices If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning @@ -1144,7 +1144,7 @@ Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. [float] -=== Reindexing many indices +==== Reindexing many indices If you have many indices to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up many indices. That way you can resume the process if there are any errors by removing the @@ -1170,7 +1170,7 @@ done // NOTCONSOLE [float] -=== Reindex daily indices +==== Reindex daily indices Notwithstanding the above advice, you can use `_reindex` in combination with <> to reindex daily indices to apply @@ -1228,7 +1228,7 @@ The previous method can also be used in conjunction with <> [float] -=== Return values +==== Return values Three types of values can be requested: _term information_, _term statistics_ and _field statistics_. By default, all term information and field statistics are returned for all fields but no term statistics. [float] -==== Term information +===== Term information * term frequency in the field (always returned) * term positions (`positions` : true) @@ -55,7 +55,7 @@ using UTF-16. ====== [float] -==== Term statistics +===== Term statistics Setting `term_statistics` to `true` (default is `false`) will return @@ -68,7 +68,7 @@ By default these values are not returned since term statistics can have a serious performance impact. [float] -==== Field statistics +===== Field statistics Setting `field_statistics` to `false` (default is `true`) will omit : @@ -80,7 +80,7 @@ omit : each term in this field) [float] -==== Terms Filtering +===== Terms Filtering With the parameter `filter`, the terms returned could also be filtered based on their tf-idf scores. This could be useful in order find out a good @@ -108,7 +108,7 @@ The following sub-parameters are supported: The maximum word length above which words will be ignored. Defaults to unbounded (`0`). [float] -=== Behaviour +==== Behaviour The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the @@ -119,7 +119,7 @@ when requesting term vectors of artificial documents, a shard to get the statist from is randomly selected. Use `routing` only to hit a particular shard. [float] -==== Example: Returning stored term vectors +===== Example: Returning stored term vectors First, we create an index that stores term vectors, payloads etc. : @@ -265,7 +265,7 @@ Response: // TESTRESPONSE[s/"took": 6/"took": "$body.took"/] [float] -==== Example: Generating term vectors on the fly +===== Example: Generating term vectors on the fly Term vectors which are not explicitly stored in the index are automatically computed on the fly. The following request returns all information and statistics for the @@ -288,7 +288,7 @@ GET /twitter/_termvectors/1 [[docs-termvectors-artificial-doc]] [float] -==== Example: Artificial documents +===== Example: Artificial documents Term vectors can also be generated for artificial documents, that is for documents not present in the index. For example, the following request would @@ -312,7 +312,7 @@ GET /twitter/_termvectors [[docs-termvectors-per-field-analyzer]] [float] -===== Per-field analyzer +====== Per-field analyzer Additionally, a different analyzer than the one at the field may be provided by using the `per_field_analyzer` parameter. This is useful in order to @@ -378,7 +378,7 @@ Response: [[docs-termvectors-terms-filtering]] [float] -==== Example: Terms filtering +===== Example: Terms filtering Finally, the terms returned could be filtered based on their tf-idf scores. In the example below we obtain the three most "interesting" keywords from the diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 163ce17194268..fded2c95e5e61 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -1,5 +1,5 @@ [[docs-update-by-query]] -== Update By Query API +=== Update By Query API The simplest usage of `_update_by_query` just performs an update on every document in the index without changing the source. This is useful to @@ -196,7 +196,7 @@ POST twitter/_update_by_query?pipeline=set-foo // TEST[setup:twitter] [float] -=== URL Parameters +==== URL Parameters In addition to the standard parameters like `pretty`, the Update By Query API also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, @@ -246,7 +246,7 @@ starting the next set. This is "bursty" instead of "smooth". The default is `-1` [float] [[docs-update-by-query-response-body]] -=== Response body +==== Response body ////////////////////////// [source,js] @@ -351,7 +351,7 @@ version conflicts. [float] [[docs-update-by-query-task-api]] -=== Works with the Task API +==== Works with the Task API You can fetch the status of all running update by query requests with the <>: @@ -433,7 +433,7 @@ you to delete that document. [float] [[docs-update-by-query-cancel-task-api]] -=== Works with the Cancel Task API +==== Works with the Cancel Task API Any update by query can be cancelled using the <>: @@ -452,7 +452,7 @@ that it has been cancelled and terminates itself. [float] [[docs-update-by-query-rethrottle]] -=== Rethrottling +==== Rethrottling The value of `requests_per_second` can be changed on a running update by query using the `_rethrottle` API: @@ -474,7 +474,7 @@ timeouts. [float] [[docs-update-by-query-slice]] -=== Slicing +==== Slicing Update by query supports <> to parallelize the updating process. This parallelization can improve efficiency and provide a convenient way to @@ -482,7 +482,7 @@ break the request down into smaller parts. [float] [[docs-update-by-query-manual-slice]] -==== Manual slicing +===== Manual slicing Slice an update by query manually by providing a slice id and total number of slices to each request: @@ -539,7 +539,7 @@ Which results in a sensible `total` like this one: [float] [[docs-update-by-query-automatic-slice]] -==== Automatic slicing +===== Automatic slicing You can also let update by query automatically parallelize using <> to slice on `_id`. Use `slices` to specify the number of @@ -612,7 +612,7 @@ though these are all taken at approximately the same time. [float] [[docs-update-by-query-picking-slices]] -===== Picking the number of slices +====== Picking the number of slices If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If you're slicing manually or otherwise tuning @@ -632,7 +632,7 @@ documents being reindexed and cluster resources. [float] [[picking-up-a-new-property]] -=== Pick up a new property +==== Pick up a new property Say you created an index without dynamic mapping, filled it with data, and then added a mapping value to pick up more fields from the data: diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 00cd66232190f..c0cc88bb4f5e7 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -1,12 +1,11 @@ [[docs-update]] -== Update API +=== Update API The update API allows to update a document based on a script provided. The operation gets the document (collocated with the shard) from the index, runs the script (with optional script language and parameters), and indexes back the result (also allows to delete, or ignore the -operation). It uses versioning to make sure no updates have happened -during the "get" and "reindex". +operation). Note, this operation still means full reindex of the document, it just removes some network roundtrips and reduces chances of version conflicts @@ -26,7 +25,7 @@ PUT test/_doc/1 // CONSOLE [float] -=== Scripted updates +==== Scripted updates Now, we can execute a script that would increment the counter: @@ -136,7 +135,7 @@ POST test/_update/1 // TEST[continued] [float] -=== Updates with a partial document +==== Updates with a partial document The update API also supports passing a partial document, which will be merged into the existing document (simple recursive merge, @@ -162,7 +161,7 @@ If both `doc` and `script` are specified, then `doc` is ignored. Best is to put your field pairs of the partial document in the script itself. [float] -=== Detecting noop updates +==== Detecting noop updates If `doc` is specified its value is merged with the existing `_source`. By default updates that don't change anything detect that they don't change anything and return `"result": "noop"` like this: @@ -195,6 +194,8 @@ the request was ignored. "_type": "_doc", "_id": "1", "_version": 7, + "_primary_term": 1, + "_seq_no": 6, "result": "noop" } -------------------------------------------------- @@ -217,7 +218,7 @@ POST test/_update/1 [[upserts]] [float] -=== Upserts +==== Upserts If the document does not already exist, the contents of the `upsert` element will be inserted as a new document. If the document does exist, then the @@ -244,7 +245,7 @@ POST test/_update/1 [float] [[scripted_upsert]] -==== `scripted_upsert` +===== `scripted_upsert` If you would like your script to run regardless of whether the document exists or not -- i.e. the script handles initializing the document instead of the @@ -274,7 +275,7 @@ POST sessions/_update/dh3sgudg8gsrgl [float] [[doc_as_upsert]] -==== `doc_as_upsert` +===== `doc_as_upsert` Instead of sending a partial `doc` plus an `upsert` doc, setting `doc_as_upsert` to `true` will use the contents of `doc` as the `upsert` @@ -294,7 +295,7 @@ POST test/_update/1 // TEST[continued] [float] -=== Parameters +==== Parameters The update operation supports the following query-string parameters: @@ -331,26 +332,7 @@ Control when the changes made by this request are visible to search. See Allows to control if and how the updated source should be returned in the response. By default the updated source is not returned. -See <> for details. - - -`version`:: - -The update API uses the Elasticsearch versioning support internally to make -sure the document doesn't change during the update. You can use the `version` -parameter to specify that the document should only be updated if its version -matches the one specified. - -[NOTE] -.The update API does not support versioning other than internal -===================================================== - -External (version types `external` and `external_gte`) or forced (version type `force`) -versioning is not supported by the update API as it would result in Elasticsearch -version numbers being out of sync with the external system. Use the -<> instead. - -===================================================== +See <> for details. `if_seq_no` and `if_primary_term`:: diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 77b380dd1dfed..f68ebd05093b4 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -182,7 +182,7 @@ If everything goes well with installation, you should see a bunch of messages th -------------------------------------------------- -Without going too much into detail, we can see that our node named "6-bjhwl" (which will be a different set of characters in your case) has started and elected itself as a master in a single cluster. Don't worry yet at the moment what master means. The main thing that is important here is that we have started one node within one cluster. +Without going too much into detail, we can see that our node named "localhost.localdomain" has started and elected itself as a master in a single cluster. Don't worry yet at the moment what master means. The main thing that is important here is that we have started one node within one cluster. As mentioned previously, we can override either the cluster or node name. This can be done from the command line when starting Elasticsearch as follows: @@ -715,14 +715,14 @@ As for the response, we see the following parts: ** `hits.total.relation` - whether `hits.total.value` is the exact hit count, in which case it is equal to `"eq"` or a lower bound of the total hit count (greater than or equals), in which case it is equal to `gte`. * `hits.hits` – actual array of search results (defaults to first 10 documents) -* `hits.sort` - sort key for results (missing if sorting by score) +* `hits.sort` - sort value of the sort key for each result (missing if sorting by score) * `hits._score` and `max_score` - ignore these fields for now The accuracy of `hits.total` is controlled by the request parameter `track_total_hits`, when set to true the request will track the total hits accurately (`"relation": "eq"`). It defaults to `10,000` which means that the total hit count is accurately tracked up to `10,000` documents. You can force an accurate count by setting `track_total_hits` to true explicitly. -See the <> documentation +See the <> documentation for more details. Here is the same exact search above using the alternative request body method: diff --git a/docs/reference/how-to/general.asciidoc b/docs/reference/how-to/general.asciidoc index b78ef63b4c816..9633c7fe843f2 100644 --- a/docs/reference/how-to/general.asciidoc +++ b/docs/reference/how-to/general.asciidoc @@ -9,7 +9,7 @@ Elasticsearch is designed as a search engine, which makes it very good at getting back the top documents that match a query. However, it is not as good for workloads that fall into the database domain, such as retrieving all documents that match a particular query. If you need to do this, make sure to -use the <> API. +use the <> API. [float] [[maximum-document-size]] @@ -27,7 +27,7 @@ needs to fetch the `_id` of the document in all cases, and the cost of getting this field is bigger for large documents due to how the filesystem cache works. Indexing this document can use an amount of memory that is a multiplier of the original size of the document. Proximity search (phrase queries for instance) -and <> also become more expensive +and <> also become more expensive since their cost directly depends on the size of the original document. It is sometimes useful to reconsider what the unit of information should be. diff --git a/docs/reference/how-to/indexing-speed.asciidoc b/docs/reference/how-to/indexing-speed.asciidoc index 147d3a557c830..f4c224829a456 100644 --- a/docs/reference/how-to/indexing-speed.asciidoc +++ b/docs/reference/how-to/indexing-speed.asciidoc @@ -47,7 +47,9 @@ By default, Elasticsearch runs this operation every second, but only on indices that have received one search request or more in the last 30 seconds. This is the optimal configuration if you have no or very little search traffic (e.g. less than one search request every 5 minutes) and want to optimize for -indexing speed. +indexing speed. This behavior aims to automatically optimize bulk indexing in +the default case when no searches are performed. In order to opt out of this +behavior set the refresh interval explicitly. On the other hand, if your index experiences regular search requests, this default behavior means that Elasticsearch will refresh your index every 1 diff --git a/docs/reference/how-to/recipes/scoring.asciidoc b/docs/reference/how-to/recipes/scoring.asciidoc index 25425277839f7..90bff3098571a 100644 --- a/docs/reference/how-to/recipes/scoring.asciidoc +++ b/docs/reference/how-to/recipes/scoring.asciidoc @@ -29,7 +29,7 @@ are different too. The recommended way to work around this issue is to use a string that identifies the user that is logged is (a user id or session id for instance) as a -<>. This ensures that all queries of a +<>. This ensures that all queries of a given user are always going to hit the same shards, so scores remain more consistent across queries. diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index e09b18df20c5d..9c445a148db10 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -299,13 +299,17 @@ leveraging the query cache. [float] === Force-merge read-only indices -Indices that are read-only would benefit from being -<>. This is typically the -case with time-based indices: only the index for the current time frame is -getting new documents while older indices are read-only. - -IMPORTANT: Don't force-merge indices that are still being written to -- leave -merging to the background merge process. +Indices that are read-only may benefit from being <>. This is typically the case with time-based indices: +only the index for the current time frame is getting new documents while older +indices are read-only. Shards that have been force-merged into a single segment +can use simpler and more efficient data structures to perform searches. + +IMPORTANT: Do not force-merge indices to which you are still writing, or to +which you will write again in the future. Instead, rely on the automatic +background merge process to perform merges as needed to keep the index running +smoothly. If you continue to write to a force-merged index then its performance +may become much worse. [float] === Warm up global ordinals @@ -315,7 +319,8 @@ Global ordinals are a data-structure that is used in order to run <> fields. They are loaded lazily in memory because Elasticsearch does not know which fields will be used in `terms` aggregations and which fields won't. You can tell Elasticsearch to load global ordinals -eagerly at refresh-time by configuring mappings as described below: +eagerly when starting or refreshing a shard by configuring mappings as +described below: [source,js] -------------------------------------------------- diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 26c2cb2d26b75..85855d18beae6 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -98,12 +98,13 @@ that the index is managed and in the `new` phase: "managed": true, <1> "policy": "my_policy", <2> "lifecycle_date_millis": 1538475653281, <3> + "age": "15s", <4> "phase": "new", - "phase_time_millis": 1538475653317, <4> + "phase_time_millis": 1538475653317, <5> "action": "complete", - "action_time_millis": 1538475653317, <5> + "action_time_millis": 1538475653317, <6> "step": "complete", - "step_time_millis": 1538475653317 <6> + "step_time_millis": 1538475653317 <7> } } } @@ -114,9 +115,10 @@ that the index is managed and in the `new` phase: ILM the other fields will not be shown <2> The name of the policy which ILM is using for this index <3> The timestamp used for the `min_age` -<4> When the index entered the current phase -<5> When the index entered the current action -<6> When the index entered the current step +<4> The age of the index (used for calculating when to enter the next phase) +<5> When the index entered the current phase +<6> When the index entered the current action +<7> When the index entered the current step Once the policy is running on the index, the response includes a `phase_execution` object that shows the definition of the current phase. @@ -133,6 +135,7 @@ phase completes. "policy": "my_lifecycle3", "lifecycle_date_millis": 1538475653281, "lifecycle_date": "2018-10-15T13:45:21.981Z", + "age": "25.14s", "phase": "hot", "phase_time_millis": 1538475653317, "phase_time": "2018-10-15T13:45:22.577Z", @@ -181,6 +184,7 @@ information for the step that's being performed on the index. "policy": "my_lifecycle3", "lifecycle_date_millis": 1538475653281, "lifecycle_date": "2018-10-15T13:45:21.981Z", + "age": "4.12m", "phase": "warm", "phase_time_millis": 1538475653317, "phase_time": "2018-10-15T13:45:22.577Z", @@ -241,6 +245,7 @@ the step that failed and the step info provides information about the error. "policy": "my_lifecycle3", "lifecycle_date_millis": 1538475653281, "lifecycle_date": "2018-10-15T13:45:21.981Z", + "age": "50.1d", "phase": "hot", "phase_time_millis": 1538475653317, "phase_time": "2018-10-15T13:45:22.577Z", diff --git a/docs/reference/ilm/apis/slm-api.asciidoc b/docs/reference/ilm/apis/slm-api.asciidoc new file mode 100644 index 0000000000000..bb175d67317c2 --- /dev/null +++ b/docs/reference/ilm/apis/slm-api.asciidoc @@ -0,0 +1,350 @@ +[role="xpack"] +[testenv="basic"] +[[snapshot-lifecycle-management-api]] +== Snapshot lifecycle management API + +The Snapshot Lifecycle Management APIs are used to manage policies for the time +and frequency of automatic snapshots. Snapshot Lifecycle Management is related +to <>, however, instead +of managing a lifecycle of actions that are performed on a single index, SLM +allows configuring policies spanning multiple indices. + +SLM policy management is split into three different CRUD APIs, a way to put or update +policies, a way to retrieve policies, and a way to delete unwanted policies, as +well as a separate API for immediately invoking a snapshot based on a policy. + +Since SLM falls under the same category as ILM, it is stopped and started by +using the <> ILM APIs. + +[[slm-api-put]] +=== Put Snapshot Lifecycle Policy API + +Creates or updates a snapshot policy. If the policy already exists, the version +is incremented. Only the latest version of a policy is stored. + +When a policy is created it is immediately scheduled based on the schedule of +the policy, when a policy is updated its schedule changes are immediately +applied. + +==== Path Parameters + +`policy_id` (required):: + (string) Identifier (id) for the policy. + +==== Request Parameters + +include::{docdir}/rest-api/timeoutparms.asciidoc[] + +==== Authorization + +You must have the `manage_slm` cluster privilege to use this API. You must also +have the `manage` index privilege on all indices being managed by `policy`. All +operations executed by {slm} for a policy are executed as the user that put the +latest version of a policy. For more information, see +{stack-ov}/security-privileges.html[Security Privileges]. + +==== Example + +The following creates a snapshot lifecycle policy with an id of +`daily-snapshots`: + +[source,js] +-------------------------------------------------- +PUT /_slm/policy/daily-snapshots +{ + "schedule": "0 30 1 * * ?", <1> + "name": "", <2> + "repository": "my_repository", <3> + "config": { <4> + "indices": ["data-*", "important"], <5> + "ignore_unavailable": false, + "include_global_state": false + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:setup-repository] +<1> When the snapshot should be taken, in this case, 1:30am daily +<2> The name each snapshot should be given +<3> Which repository to take the snapshot in +<4> Any extra snapshot configuration +<5> Which indices the snapshot should contain + +The top-level keys that the policy supports are described below: + +|================== +| Key | Description + +| `schedule` | A periodic or absolute time schedule. Supports all values + supported by the cron scheduler: + {xpack-ref}/trigger-schedule.html#schedule-cron[Cron scheduler configuration] + +| `name` | A name automatically given to each snapshot performed by this policy. + Supports the same <> supported in index + names. A UUID is automatically appended to the end of the name to prevent + conflicting snapshot names. + +| `repository` | The snapshot repository that will contain snapshots created by + this policy. The repository must exist prior to the policy's creation and can + be created with the <>. + +| `config` | Configuration for each snapshot that will be created by this + policy. Any configuration is included with <> issued by this policy. +|================== + +To update an existing policy, simply use the put snapshot lifecycle policy API +with the same policy id as an existing policy. + +[[slm-api-get]] +=== Get Snapshot Lifecycle Policy API + +Once a policy is in place, you can retrieve one or more of the policies using +the get snapshot lifecycle policy API. This also includes information about the +latest successful and failed invocation that the automatic snapshots have taken. + +==== Path Parameters + +`policy_ids` (optional):: + (string) Comma-separated ids of policies to retrieve. + +==== Examples + +To retrieve a policy, perform a `GET` with the policy's id + +[source,js] +-------------------------------------------------- +GET /_slm/policy/daily-snapshots?human +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The output looks similar to the following: + +[source,js] +-------------------------------------------------- +{ + "daily-snapshots" : { + "version": 1, <1> + "modified_date": "2019-04-23T01:30:00.000Z", <2> + "modified_date_millis": 1556048137314, + "policy" : { + "schedule": "0 30 1 * * ?", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["data-*", "important"], + "ignore_unavailable": false, + "include_global_state": false + } + }, + "next_execution": "2019-04-24T01:30:00.000Z", <3> + "next_execution_millis": 1556048160000 + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"modified_date": "2019-04-23T01:30:00.000Z"/"modified_date": $body.daily-snapshots.modified_date/ s/"modified_date_millis": 1556048137314/"modified_date_millis": $body.daily-snapshots.modified_date_millis/ s/"next_execution": "2019-04-24T01:30:00.000Z"/"next_execution": $body.daily-snapshots.next_execution/ s/"next_execution_millis": 1556048160000/"next_execution_millis": $body.daily-snapshots.next_execution_millis/] +<1> The version of the snapshot policy, only the latest verison is stored and incremented when the policy is updated +<2> The last time this policy was modified +<3> The next time this policy will be executed + +Or, to retrieve all policies: + +[source,js] +-------------------------------------------------- +GET /_slm/policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[[slm-api-execute]] +=== Execute Snapshot Lifecycle Policy API + +Sometimes it can be useful to immediately execute a snapshot based on policy, +perhaps before an upgrade or before performing other maintenance on indices. The +execute snapshot policy API allows you to perform a snapshot immediately without +waiting for a policy's scheduled invocation. + +==== Path Parameters + +`policy_id` (required):: + (string) Id of the policy to execute + +==== Example + +To take an immediate snapshot using a policy, use the following + +[source,js] +-------------------------------------------------- +PUT /_slm/policy/daily-snapshots/_execute +-------------------------------------------------- +// CONSOLE +// TEST[skip:we can't easily handle snapshots from docs tests] + +This API will immediately return with the generated snapshot name + +[source,js] +-------------------------------------------------- +{ + "snapshot_name": "daily-snap-2019.04.24-gwrqoo2xtea3q57vvg0uea" +} +-------------------------------------------------- +// TESTRESPONSE[skip:we can't handle snapshots from docs tests] + +The snapshot will be taken in the background, you can use the +<> to monitor the status of the snapshot. + +Once a snapshot has been kicked off, you can see the latest successful or failed +snapshot using the get snapshot lifecycle policy API: + +[source,js] +-------------------------------------------------- +GET /_slm/policy/daily-snapshots?human +-------------------------------------------------- +// CONSOLE +// TEST[skip:we already tested get policy above, the last_failure may not be present though] + +Which, in this case shows an error because the index did not exist: + +[source,js] +-------------------------------------------------- +{ + "daily-snapshots" : { + "version": 1, + "modified_date": "2019-04-23T01:30:00.000Z", + "modified_date_millis": 1556048137314, + "policy" : { + "schedule": "0 30 1 * * ?", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["data-*", "important"], + "ignore_unavailable": false, + "include_global_state": false + } + }, + "last_failure": { <1> + "snapshot_name": "daily-snap-2019.04.02-lohisb5ith2n8hxacaq3mw", + "time_string": "2019-04-02T01:30:00.000Z", + "time": 1556042030000, + "details": "{\"type\":\"index_not_found_exception\",\"reason\":\"no such index [important]\",\"resource.type\":\"index_or_alias\",\"resource.id\":\"important\",\"index_uuid\":\"_na_\",\"index\":\"important\",\"stack_trace\":\"[important] IndexNotFoundException[no such index [important]]\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.indexNotFoundException(IndexNameExpressionResolver.java:762)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.innerResolve(IndexNameExpressionResolver.java:714)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.resolve(IndexNameExpressionResolver.java:670)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:163)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndexNames(IndexNameExpressionResolver.java:142)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndexNames(IndexNameExpressionResolver.java:102)\\n\\tat org.elasticsearch.snapshots.SnapshotsService$1.execute(SnapshotsService.java:280)\\n\\tat org.elasticsearch.cluster.ClusterStateUpdateTask.execute(ClusterStateUpdateTask.java:47)\\n\\tat org.elasticsearch.cluster.service.MasterService.executeTasks(MasterService.java:687)\\n\\tat org.elasticsearch.cluster.service.MasterService.calculateTaskOutputs(MasterService.java:310)\\n\\tat org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:210)\\n\\tat org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:142)\\n\\tat org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150)\\n\\tat org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188)\\n\\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688)\\n\\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252)\\n\\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215)\\n\\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\\n\\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\\n\\tat java.base/java.lang.Thread.run(Thread.java:834)\\n\"}" + } , + "next_execution": "2019-04-24T01:30:00.000Z", + "next_execution_millis": 1556048160000 + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:the presence of last_failure is asynchronous and will be present for users, but is untestable] +<1> The last unsuccessfully initiated snapshot by this policy, along with the details of its failure + +In this case, it failed due to the "important" index not existing and +`ignore_unavailable` setting being set to `false`. + +Updating the policy to change the `ignore_unavailable` setting is done using the +same put snapshot lifecycle policy API: + +[source,js] +-------------------------------------------------- +PUT /_slm/policy/daily-snapshots +{ + "schedule": "0 30 1 * * ?", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["data-*", "important"], + "ignore_unavailable": true, + "include_global_state": false + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Another snapshot can immediately be executed to ensure the new policy works: + +[source,js] +-------------------------------------------------- +PUT /_slm/policy/daily-snapshots/_execute +-------------------------------------------------- +// CONSOLE +// TEST[skip:we can't handle snapshots in docs tests] + +[source,js] +-------------------------------------------------- +{ + "snapshot_name": "daily-snap-2019.04.24-tmtnyjtrsxkhbrrdcgg18a" +} +-------------------------------------------------- +// TESTRESPONSE[skip:we can't handle snapshots in docs tests] + +Now retriving the policy shows that the policy has successfully been executed: + + +[source,js] +-------------------------------------------------- +GET /_slm/policy/daily-snapshots?human +-------------------------------------------------- +// CONSOLE +// TEST[skip:we already tested this above and the output may not be available yet] + +Which now includes the successful snapshot information: + +[source,js] +-------------------------------------------------- +{ + "daily-snapshots" : { + "version": 2, <1> + "modified_date": "2019-04-23T01:30:00.000Z", + "modified_date_millis": 1556048137314, + "policy" : { + "schedule": "0 30 1 * * ?", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["data-*", "important"], + "ignore_unavailable": true, + "include_global_state": false + } + }, + "last_success": { <2> + "snapshot_name": "daily-snap-2019.04.24-tmtnyjtrsxkhbrrdcgg18a", + "time_string": "2019-04-24T16:43:49.316Z", + "time": 1556124229316 + } , + "last_failure": { + "snapshot_name": "daily-snap-2019.04.02-lohisb5ith2n8hxacaq3mw", + "time_string": "2019-04-02T01:30:00.000Z", + "time": 1556042030000, + "details": "{\"type\":\"index_not_found_exception\",\"reason\":\"no such index [important]\",\"resource.type\":\"index_or_alias\",\"resource.id\":\"important\",\"index_uuid\":\"_na_\",\"index\":\"important\",\"stack_trace\":\"[important] IndexNotFoundException[no such index [important]]\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.indexNotFoundException(IndexNameExpressionResolver.java:762)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.innerResolve(IndexNameExpressionResolver.java:714)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.resolve(IndexNameExpressionResolver.java:670)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:163)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndexNames(IndexNameExpressionResolver.java:142)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndexNames(IndexNameExpressionResolver.java:102)\\n\\tat org.elasticsearch.snapshots.SnapshotsService$1.execute(SnapshotsService.java:280)\\n\\tat org.elasticsearch.cluster.ClusterStateUpdateTask.execute(ClusterStateUpdateTask.java:47)\\n\\tat org.elasticsearch.cluster.service.MasterService.executeTasks(MasterService.java:687)\\n\\tat org.elasticsearch.cluster.service.MasterService.calculateTaskOutputs(MasterService.java:310)\\n\\tat org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:210)\\n\\tat org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:142)\\n\\tat org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150)\\n\\tat org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188)\\n\\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688)\\n\\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252)\\n\\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215)\\n\\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\\n\\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\\n\\tat java.base/java.lang.Thread.run(Thread.java:834)\\n\"}" + } , + "next_execution": "2019-04-24T01:30:00.000Z", + "next_execution_millis": 1556048160000 + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:the presence of last_failure and last_success is asynchronous and will be present for users, but is untestable] +<1> The policy's version has been incremented because it was updated +<2> The last successfully initiated snapshot information + +It is a good idea to test policies using the execute API to ensure they work. + +[[slm-api-delete]] +=== Delete Snapshot Lifecycle Policy API + +A policy can be deleted by issuing a delete request with the policy id. Note +that this prevents any future snapshots from being taken, but does not cancel +any currently ongoing snapshots or remove any previously taken snapshots. + +==== Path Parameters + +`policy_id` (optional):: + (string) Id of the policy to remove. + +==== Example + +[source,js] +-------------------------------------------------- +DELETE /_slm/policy/daily-snapshots +-------------------------------------------------- +// CONSOLE +// TEST[continued] diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index abe643255bf95..831376d68d28b 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -76,20 +76,21 @@ Which returns the following information: "managed" : true, <1> "policy" : "shrink-the-index", <2> "lifecycle_date_millis" : 1541717265865, - "phase" : "warm", <3> + "age": "5.1d", <3> + "phase" : "warm", <4> "phase_time_millis" : 1541717272601, - "action" : "shrink", <4> + "action" : "shrink", <5> "action_time_millis" : 1541717272601, - "step" : "ERROR", <5> + "step" : "ERROR", <6> "step_time_millis" : 1541717272688, - "failed_step" : "shrink", <6> + "failed_step" : "shrink", <7> "step_info" : { - "type" : "illegal_argument_exception", <7> - "reason" : "the number of target shards [4] must be less that the number of source shards [2]" <8> + "type" : "illegal_argument_exception", <8> + "reason" : "the number of target shards [4] must be less that the number of source shards [2]" <9> }, "phase_execution" : { "policy" : "shrink-the-index", - "phase_definition" : { <9> + "phase_definition" : { <10> "min_age" : "5d", "actions" : { "shrink" : { @@ -108,13 +109,14 @@ Which returns the following information: // TESTRESPONSE[skip:no way to know if we will get this response immediately] <1> this index is managed by ILM <2> the policy in question, in this case, "shrink-the-index" -<3> what phase the index is currently in -<4> what action the index is currently on -<5> what step the index is currently on, in this case, because there is an error, the index is in the "ERROR" step -<6> the name of the step that failed to execute, in this case "shrink" -<7> the error class that occurred during this step -<8> the error message that occurred during the execution failure -<9> the definition of the phase (in this case, the "warm" phase) that the index is currently on +<3> the current age for the index +<4> what phase the index is currently in +<5> what action the index is currently on +<6> what step the index is currently on, in this case, because there is an error, the index is in the "ERROR" step +<7> the name of the step that failed to execute, in this case "shrink" +<8> the error class that occurred during this step +<9> the error message that occurred during the execution failure +<10> the definition of the phase (in this case, the "warm" phase) that the index is currently on The index here has been moved to the error step because the shrink definition in the policy is using an incorrect number of shards. So rectifying that in the diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc index 3d1935721643b..f74e49fe11222 100644 --- a/docs/reference/ilm/getting-started-ilm.asciidoc +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -173,15 +173,16 @@ managed indices. "managed": true, <1> "policy": "datastream_policy", <2> "lifecycle_date_millis": 1538475653281, - "phase": "hot", <3> + "age": "30s", <3> + "phase": "hot", <4> "phase_time_millis": 1538475653317, - "action": "rollover", <4> + "action": "rollover", <5> "action_time_millis": 1538475653317, - "step": "attempt-rollover", <5> + "step": "attempt-rollover", <6> "step_time_millis": 1538475653317, "phase_execution": { "policy": "datastream_policy", - "phase_definition": { <6> + "phase_definition": { <7> "min_age": "0ms", "actions": { "rollover": { @@ -190,7 +191,7 @@ managed indices. } } }, - "version": 1, <7> + "version": 1, <8> "modified_date_in_millis": 1539609701576 } } @@ -201,12 +202,13 @@ managed indices. // TESTRESPONSE[skip:no way to know if we will get this response immediately] <1> this index is managed by ILM <2> the policy in question, in this case, "datastream_policy" -<3> what phase the index is currently in -<4> what action the index is currently on -<5> what step the index is currently on -<6> the definition of the phase +<3> the current age of the index +<4> what phase the index is currently in +<5> what action the index is currently on +<6> what step the index is currently on +<7> the definition of the phase (in this case, the "hot" phase) that the index is currently on -<7> the version of the policy being used to execute the current phase +<8> the version of the policy being used to execute the current phase You can read about the full details of this response in the <>. For now, let's focus on how diff --git a/docs/reference/ilm/getting-started-slm.asciidoc b/docs/reference/ilm/getting-started-slm.asciidoc new file mode 100644 index 0000000000000..5849101ffe6c3 --- /dev/null +++ b/docs/reference/ilm/getting-started-slm.asciidoc @@ -0,0 +1,215 @@ +[role="xpack"] +[testenv="basic"] +[[getting-started-snapshot-lifecycle-management]] +== Getting started with snapshot lifecycle management + +Let's get started with snapshot lifecycle management (SLM) by working through a +hands-on scenario. The goal of this example is to automatically back up {es} +indices using the <> every day at a particular +time. + +[float] +[[slm-and-security]] +=== Security and SLM +Before starting, it's important to understand the privileges that are needed +when configuring SLM if you are using the security plugin. There are two +built-in cluster privileges that can be used to assist: `manage_slm` and +`read_slm`. It's also good to note that the `create_snapshot` permission +allows taking snapshots even for indices the role may not have access to. + +An example of configuring an administrator role for SLM follows: + +[source,js] +----------------------------------- +POST /_security/role/slm-admin +{ + "cluster": ["manage_slm", "create_snapshot"], + "indices": [ + { + "names": [".slm-history-*"], + "privileges": ["all"] + } + ] +} +----------------------------------- +// CONSOLE +// TEST[skip:security is not enabled here] + +Or, for a read-only role that can retrieve policies (but not update, execute, or +delete them), as well as only view the history index: + +[source,js] +----------------------------------- +POST /_security/role/slm-read-only +{ + "cluster": ["read_slm"], + "indices": [ + { + "names": [".slm-history-*"], + "privileges": ["read"] + } + ] +} +----------------------------------- +// CONSOLE +// TEST[skip:security is not enabled here] + +[float] +[[slm-gs-create-policy]] +=== Setting up a repository + +Before we can set up an SLM policy, we'll need to set up a +<> where the snapshots will be +stored. Repositories can use {plugins}/repository.html[many different backends], +including cloud storage providers. You'll probably want to use one of these in +production, but for this example we'll use a shared file system repository: + +[source,js] +----------------------------------- +PUT /_snapshot/my_repository +{ + "type": "fs", + "settings": { + "location": "my_backup_location" + } +} +----------------------------------- +// CONSOLE +// TEST + +[float] +=== Setting up a policy + +Now that we have a repository in place, we can create a policy to automatically +take snapshots. Policies are written in JSON and will define when to take +snapshots, what the snapshots should be named, and which indices should be +included, among other things. We'll use the <> API +to create the policy. + +[source,js] +-------------------------------------------------- +PUT /_slm/policy/nightly-snapshots +{ + "schedule": "0 30 1 * * ?", <1> + "name": "", <2> + "repository": "my_repository", <3> + "config": { <4> + "indices": ["*"] <5> + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> when the snapshot should be taken, using + {xpack-ref}/trigger-schedule.html#schedule-cron[Cron syntax], in this + case at 1:30AM each day +<2> whe name each snapshot should be given, using + <> to include the current date in the name + of the snapshot +<3> the repository the snapshot should be stored in +<4> the configuration to be used for the snapshot requests (see below) +<5> which indices should be included in the snapshot, in this case, every index + +This policy will take a snapshot of every index each day at 1:30AM UTC. +Snapshots are incremental, allowing frequent snapshots to be stored efficiently, +so don't be afraid to configure a policy to take frequent snapshots. + +In addition to specifying the indices that should be included in the snapshot, +the `config` field can be used to customize other aspects of the snapshot. You +can use any option allowed in <>, so you can specify, for example, whether the snapshot should fail in +special cases, such as if one of the specified indices cannot be found. + +[float] +=== Making sure the policy works + +While snapshots taken by SLM policies can be viewed through the standard snapshot +API, SLM also keeps track of policy successes and failures in ways that are a bit +easier to use to make sure the policy is working. Once a policy has executed at +least once, when you view the policy using the <>, +some metadata will be returned indicating whether the snapshot was sucessfully +initiated or not. + +Instead of waiting for our policy to run, let's tell SLM to take a snapshot +as using the configuration from our policy right now instead of waiting for +1:30AM. + +[source,js] +-------------------------------------------------- +PUT /_slm/policy/nightly-snapshots/_execute +-------------------------------------------------- +// CONSOLE +// TEST[skip:we can't easily handle snapshots from docs tests] + +This request will kick off a snapshot for our policy right now, regardless of +the schedule in the policy. This is useful for taking snapshots before making +a configuration change, upgrading, or for our purposes, making sure our policy +is going to work successfully. The policy will continue to run on its configured +schedule after this execution of the policy. + +[source,js] +-------------------------------------------------- +GET /_slm/policy/nightly-snapshots?human +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +This request will return a response that includes the policy, as well as +information about the last time the policy succeeded and failed, as well as the +next time the policy will be executed. + +[source,js] +-------------------------------------------------- +{ + "nightly-snapshots" : { + "version": 1, + "modified_date": "2019-04-23T01:30:00.000Z", + "modified_date_millis": 1556048137314, + "policy" : { + "schedule": "0 30 1 * * ?", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["*"], + } + }, + "last_success": { <1> + "snapshot_name": "nightly-snap-2019.04.24-tmtnyjtrsxkhbrrdcgg18a", <2> + "time_string": "2019-04-24T16:43:49.316Z", + "time": 1556124229316 + } , + "last_failure": { <3> + "snapshot_name": "nightly-snap-2019.04.02-lohisb5ith2n8hxacaq3mw", + "time_string": "2019-04-02T01:30:00.000Z", + "time": 1556042030000, + "details": "{\"type\":\"index_not_found_exception\",\"reason\":\"no such index [important]\",\"resource.type\":\"index_or_alias\",\"resource.id\":\"important\",\"index_uuid\":\"_na_\",\"index\":\"important\",\"stack_trace\":\"[important] IndexNotFoundException[no such index [important]]\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.indexNotFoundException(IndexNameExpressionResolver.java:762)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.innerResolve(IndexNameExpressionResolver.java:714)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver$WildcardExpressionResolver.resolve(IndexNameExpressionResolver.java:670)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:163)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndexNames(IndexNameExpressionResolver.java:142)\\n\\tat org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndexNames(IndexNameExpressionResolver.java:102)\\n\\tat org.elasticsearch.snapshots.SnapshotsService$1.execute(SnapshotsService.java:280)\\n\\tat org.elasticsearch.cluster.ClusterStateUpdateTask.execute(ClusterStateUpdateTask.java:47)\\n\\tat org.elasticsearch.cluster.service.MasterService.executeTasks(MasterService.java:687)\\n\\tat org.elasticsearch.cluster.service.MasterService.calculateTaskOutputs(MasterService.java:310)\\n\\tat org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:210)\\n\\tat org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:142)\\n\\tat org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150)\\n\\tat org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188)\\n\\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688)\\n\\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252)\\n\\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215)\\n\\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\\n\\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\\n\\tat java.base/java.lang.Thread.run(Thread.java:834)\\n\"}" + } , + "next_execution": "2019-04-24T01:30:00.000Z", <4> + "next_execution_millis": 1556048160000 + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:the presence of last_failure and last_success is asynchronous and will be present for users, but is untestable] +<1> information about the last time the policy successfully initated a snapshot +<2> the name of the snapshot that was successfully initiated +<3> information about the last time the policy failed to initiate a snapshot +<4> the is the next time the policy will execute + +NOTE: This metadata only indicates whether the request to initiate the snapshot was +made successfully or not - after the snapshot has been successfully started, it +is possible for the snapshot to fail if, for example, the connection to a remote +repository is lost while copying files. + +If you're following along, the returned SLM policy shouldn't have a `last_failure` +field - it's included above only as an example. You should, however, see a +`last_success` field and a snapshot name. If you do, you've successfully taken +your first snapshot using SLM! + +While only the most recent sucess and failure are available through the Get Policy +API, all policy executions are recorded to a history index, which may be queried +by searching the index pattern `.slm-history*`. + +That's it! We have our first SLM policy set up to periodically take snapshots +so that our backups are always up to date. You can read more details in the +<> and the +<> diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc index b906f9ade4447..50d2e5f6dac22 100644 --- a/docs/reference/ilm/index.asciidoc +++ b/docs/reference/ilm/index.asciidoc @@ -47,6 +47,16 @@ to a single shard. hardware. . Delete the index once the required 30 day retention period is reached. +*Snapshot Lifecycle Management* + +ILM itself does allow managing indices, however, managing snapshots for a set of +indices is outside of the scope of an index-level policy. Instead, there are +separate APIs for managing snapshot lifecycles. Please see the +<> +documentation for information about configuring snapshots. + +See <>. + [IMPORTANT] =========================== {ilm} does not support mixed-version cluster usage. Although it @@ -73,3 +83,5 @@ include::error-handling.asciidoc[] include::ilm-and-snapshots.asciidoc[] include::start-stop-ilm.asciidoc[] + +include::getting-started-slm.asciidoc[] diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc index 22ca0ae48fd98..fd1ab654ab6cc 100644 --- a/docs/reference/ilm/start-stop-ilm.asciidoc +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -10,6 +10,10 @@ maybe there are scheduled maintenance windows when cluster topology changes are desired that may impact running ILM actions. For this reason, ILM has two ways to disable operations. +When stopping ILM, snapshot lifecycle management operations are also stopped, +this means that no scheduled snapshots are created (currently ongoing snapshots +are unaffected). + Normally, ILM will be running by default. To see the current operating status of ILM, use the <> to see the current state of ILM. diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc index bc41da6bdff63..5a0034a2a60ba 100644 --- a/docs/reference/ilm/update-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -198,6 +198,7 @@ GET my_index/_ilm/explain "managed": true, "policy": "my_executing_policy", "lifecycle_date_millis": 1538475653281, + "age": "30s", "phase": "hot", "phase_time_millis": 1538475653317, "action": "rollover", @@ -275,6 +276,7 @@ GET my_index/_ilm/explain "managed": true, "policy": "my_executing_policy", "lifecycle_date_millis": 1538475653281, + "age": "30s", "phase": "hot", "phase_time_millis": 1538475653317, "action": "rollover", @@ -354,6 +356,7 @@ GET my_index/_ilm/explain "managed": true, "policy": "my_executing_policy", "lifecycle_date_millis": 1538475653281, + "age": "30s", "phase": "hot", "phase_time_millis": 1538475653317, "action": "rollover", @@ -408,6 +411,7 @@ GET my_index/_ilm/explain "managed": true, "policy": "my_executing_policy", "lifecycle_date_millis": 1538475653281, + "age": "30s", "phase": "warm", "phase_time_millis": 1538475653317, "action": "forcemerge", diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index e34a4483ab6e9..aa95bb8fa2e33 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -124,7 +124,7 @@ specific index module: The maximum value of `from + size` for searches to this index. Defaults to `10000`. Search requests take heap memory and time proportional to `from + size` and this limits that memory. See - <> or <> for a more efficient alternative + <> or <> for a more efficient alternative to raising this. `index.max_inner_result_window`:: diff --git a/docs/reference/index-modules/allocation/delayed.asciidoc b/docs/reference/index-modules/allocation/delayed.asciidoc index 907d927fd68dc..c6626a9dec323 100644 --- a/docs/reference/index-modules/allocation/delayed.asciidoc +++ b/docs/reference/index-modules/allocation/delayed.asciidoc @@ -28,7 +28,7 @@ this scenario: If the master had just waited for a few minutes, then the missing shards could have been re-allocated to Node 5 with the minimum of network traffic. This process would be even quicker for idle shards (shards not receiving indexing -requests) which have been automatically <>. +requests) which have been automatically <>. The allocation of replica shards which become unassigned because a node has left can be delayed with the `index.unassigned.node_left.delayed_timeout` diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index f9d4b5a4e0528..c69af78dba184 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -22,23 +22,10 @@ include::{xes-repo-dir}/security/configuring-es.asciidoc[] include::setup/bootstrap-checks-xes.asciidoc[] -:edit_url: include::upgrade.asciidoc[] -include::api-conventions.asciidoc[] - -include::docs.asciidoc[] - -include::search.asciidoc[] - include::aggregations.asciidoc[] -include::indices.asciidoc[] - -include::cat.asciidoc[] - -include::cluster.asciidoc[] - include::query-dsl.asciidoc[] include::scripting.asciidoc[] @@ -67,7 +54,6 @@ include::administering.asciidoc[] include::commands/index.asciidoc[] -:edit_url: include::how-to.asciidoc[] include::testing.asciidoc[] diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index cda7c41cb42d1..2a4cff93ba759 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -1,14 +1,12 @@ [[indices]] -= Indices APIs +== Index APIs -[partintro] --- -The indices APIs are used to manage individual indices, +Index APIs are used to manage individual indices, index settings, aliases, mappings, and index templates. [float] [[index-management]] -== Index management: +=== Index management: * <> * <> @@ -18,10 +16,13 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> +* <> +* <> + [float] [[mapping-management]] -== Mapping management: +=== Mapping management: * <> * <> @@ -30,12 +31,12 @@ index settings, aliases, mappings, and index templates. [float] [[alias-management]] -== Alias management: +=== Alias management: * <> [float] [[index-settings]] -== Index settings: +=== Index settings: * <> * <> * <> @@ -43,7 +44,7 @@ index settings, aliases, mappings, and index templates. [float] [[monitoring]] -== Monitoring: +=== Monitoring: * <> * <> * <> @@ -51,14 +52,12 @@ index settings, aliases, mappings, and index templates. [float] [[status-management]] -== Status management: +=== Status management: * <> * <> * <> * <> --- - include::indices/create-index.asciidoc[] include::indices/delete-index.asciidoc[] @@ -75,6 +74,10 @@ include::indices/split-index.asciidoc[] include::indices/rollover-index.asciidoc[] +include::indices/apis/freeze.asciidoc[] + +include::indices/apis/unfreeze.asciidoc[] + include::indices/put-mapping.asciidoc[] include::indices/get-mapping.asciidoc[] diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 408cd0c5484cf..03fa18bc8a448 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -1,5 +1,5 @@ [[indices-aliases]] -== Index Aliases +=== Index Aliases APIs in Elasticsearch accept an index name when working against a specific index, and several indices when applicable. The index aliases @@ -130,7 +130,7 @@ POST /_aliases [float] [[filtered]] -=== Filtered Aliases +==== Filtered Aliases Aliases with filters provide an easy way to create different "views" of the same index. The filter can be defined using Query DSL and is applied @@ -177,7 +177,7 @@ POST /_aliases [float] [[aliases-routing]] -==== Routing +===== Routing It is possible to associate routing values with aliases. This feature can be used together with filtering aliases in order to avoid @@ -244,7 +244,7 @@ GET /alias2/_search?q=user:kimchy&routing=2,3 [float] [[aliases-write-index]] -==== Write Index +===== Write Index It is possible to associate the index pointed to by an alias as the write index. When specified, all index and update requests against an alias that point to multiple @@ -342,7 +342,7 @@ writes will be rejected. [float] [[alias-adding]] -=== Add a single alias +==== Add a single alias An alias can also be added with the endpoint @@ -360,7 +360,7 @@ where You can also use the plural `_aliases`. [float] -==== Examples: +===== Examples: Adding time based alias:: + @@ -412,7 +412,7 @@ PUT /users/_alias/user_12 [float] [[alias-index-creation]] -=== Aliases during index creation +==== Aliases during index creation Aliases can also be specified during <>: @@ -439,7 +439,7 @@ PUT /logs_20162801 [float] [[deleting]] -=== Delete aliases +==== Delete aliases The rest endpoint is: `/{index}/_alias/{name}` @@ -461,7 +461,7 @@ DELETE /logs_20162801/_alias/current_day [float] [[alias-retrieving]] -=== Retrieving existing aliases +==== Retrieving existing aliases The get index alias API allows to filter by alias name and index name. This api redirects to the master and fetches @@ -487,7 +487,7 @@ Possible options: The rest endpoint is: `/{index}/_alias/{alias}`. [float] -==== Examples: +===== Examples: All aliases for the index `logs_20162801`: diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index a0d0f3c5b2449..50dc88f3711d2 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -1,5 +1,5 @@ [[indices-analyze]] -== Analyze +=== Analyze Performs the analysis process on a text and return the tokens breakdown of the text. @@ -139,7 +139,8 @@ GET _analyze -------------------------------------------------- // CONSOLE -=== Explain Analyze +[[explain-analyze-api]] +==== Explain Analyze If you want to get more advanced details, set `explain` to `true` (defaults to `false`). It will output all token attributes for each token. You can filter token attributes you want to output by setting `attributes` option. @@ -210,7 +211,7 @@ The request returns the following result: [[tokens-limit-settings]] [float] -== Settings to prevent tokens explosion +=== Settings to prevent tokens explosion Generating excessive amount of tokens may cause a node to run out of memory. The following setting allows to limit the number of tokens that can be produced: diff --git a/docs/reference/indices/apis/freeze.asciidoc b/docs/reference/indices/apis/freeze.asciidoc index ccf35f41d09aa..b2f96725ba226 100644 --- a/docs/reference/indices/apis/freeze.asciidoc +++ b/docs/reference/indices/apis/freeze.asciidoc @@ -31,8 +31,8 @@ limitation might be removed in the future. [[freeze-index-api-path-parms]] ==== {api-path-parms-title} -`` (Required):: -(string) Identifier for the index. +``:: + (Required, string) Identifier for the index. [[freeze-index-api-examples]] ==== {api-examples-title} diff --git a/docs/reference/indices/apis/index.asciidoc b/docs/reference/indices/apis/index.asciidoc deleted file mode 100644 index 4506042706274..0000000000000 --- a/docs/reference/indices/apis/index.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[[index-apis]] -== Index APIs - -The index APIs are used to manage individual indices. - -* <>, <> - -See also <>. - -include::freeze.asciidoc[] -include::unfreeze.asciidoc[] \ No newline at end of file diff --git a/docs/reference/indices/apis/unfreeze.asciidoc b/docs/reference/indices/apis/unfreeze.asciidoc index 2c810e761bc7d..fa70259580c9d 100644 --- a/docs/reference/indices/apis/unfreeze.asciidoc +++ b/docs/reference/indices/apis/unfreeze.asciidoc @@ -30,8 +30,8 @@ limitation might be removed in the future. [[unfreeze-index-api-path-parms]] ==== {api-path-parms-title} -`` (Required):: -(string) Identifier for the index. +``:: + (Required, string) Identifier for the index. [[unfreeze-index-api-examples]] ==== {api-examples-title} diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index 6f9c06bd729b6..b09d993a12787 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -1,5 +1,5 @@ [[indices-clearcache]] -== Clear Cache +=== Clear Cache The clear cache API allows to clear either all caches or specific cached associated with one or more indices. @@ -40,7 +40,7 @@ POST /twitter/_cache/clear?fields=foo,bar <1> <1> Clear the cache for the `foo` an `bar` field [float] -=== Multi Index +==== Multi Index The clear cache API can be applied to more than one index with a single call, or even on `_all` the indices. diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index 3038265891458..f90ba08f939a1 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -1,5 +1,5 @@ [[indices-create-index]] -== Create Index +=== Create Index The Create Index API is used to manually create an index in Elasticsearch. All documents in Elasticsearch are stored inside of one index or another. @@ -30,7 +30,7 @@ There are several limitations to what you can name your index. The complete lis [float] [[create-index-settings]] -=== Index Settings +==== Index Settings Each index created can have specific settings associated with it, defined in the body: @@ -76,7 +76,7 @@ that can be set when creating an index, please check the [float] [[mappings]] -=== Mappings +==== Mappings The create index API allows for providing a mapping definition: @@ -102,7 +102,7 @@ include_type_name is set. For more details, please see <>. [float] [[create-index-aliases]] -=== Aliases +==== Aliases The create index API allows also to provide a set of <>: @@ -125,7 +125,7 @@ PUT test [float] [[create-index-wait-for-active-shards]] -=== Wait For Active Shards +==== Wait For Active Shards By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc index 1d12e0f88c507..6c95ae21f07cf 100644 --- a/docs/reference/indices/delete-index.asciidoc +++ b/docs/reference/indices/delete-index.asciidoc @@ -1,5 +1,5 @@ [[indices-delete-index]] -== Delete Index +=== Delete Index The delete index API allows to delete an existing index. diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index ea433fa9cbd13..29a5b6d4d28a4 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -1,5 +1,5 @@ [[indices-flush]] -== Flush +=== Flush The flush API allows to flush one or more indices through an API. The flush process of an index makes sure that any data that is currently only @@ -18,7 +18,7 @@ POST twitter/_flush [float] [[flush-parameters]] -=== Request Parameters +==== Request Parameters The flush API accepts the following request parameters: @@ -33,7 +33,7 @@ should be incremented even if no uncommitted changes are present. [float] [[flush-multi-index]] -=== Multi Index +==== Multi Index The flush API can be applied to more than one index with a single call, or even on `_all` the indices. @@ -47,8 +47,8 @@ POST _flush // CONSOLE // TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] -[[indices-synced-flush]] -=== Synced Flush +[[synced-flush-api]] +==== Synced Flush Elasticsearch tracks the indexing activity of each shard. Shards that have not received any indexing operations for 5 minutes are automatically marked as inactive. This presents @@ -119,7 +119,7 @@ which returns something similar to: <1> the `sync id` marker [float] -=== Synced Flush API +==== Synced Flush API The Synced Flush API allows an administrator to initiate a synced flush manually. This can be particularly useful for a planned (rolling) cluster restart where you can stop indexing and don't want to wait the default 5 minutes for diff --git a/docs/reference/indices/forcemerge.asciidoc b/docs/reference/indices/forcemerge.asciidoc index a28d5eaa8586c..f478b5743e232 100644 --- a/docs/reference/indices/forcemerge.asciidoc +++ b/docs/reference/indices/forcemerge.asciidoc @@ -1,19 +1,24 @@ [[indices-forcemerge]] -== Force Merge +=== Force Merge -The force merge API allows to force merging of one or more indices through an -API. The merge relates to the number of segments a Lucene index holds within -each shard. The force merge operation allows to reduce the number of segments by -merging them. +The force merge API allows you to force a <> on the +shards of one or more indices. Merging reduces the number of segments in each +shard by merging some of them together, and also frees up the space used by +deleted documents. Merging normally happens automatically, but sometimes it is +useful to trigger a merge manually. -This call will block until the merge is complete. If the http connection is -lost, the request will continue in the background, and any new requests will -block until the previous force merge is complete. +WARNING: **Force merge should only be called against an index after you have +finished writing to it.** Force merge can cause very large (>5GB) segments to +be produced, and if you continue to write to such an index then the automatic +merge policy will never consider these segments for future merges until they +mostly consist of deleted documents. This can cause very large segments to +remain in the index which can result in increased disk usage and worse search +performance. -WARNING: Force merge should only be called against *read-only indices*. Running -force merge against a read-write index can cause very large segments to be produced -(>5Gb per segment), and the merge policy will never consider it for merging again until -it mostly consists of deleted docs. This can cause very large segments to remain in the shards. +Calls to this API block until the merge is complete. If the client connection +is lost before completion then the force merge process will continue in the +background. Any new requests to force merge the same indices will also block +until the ongoing force merge is complete. [source,js] -------------------------------------------------- @@ -22,9 +27,25 @@ POST /twitter/_forcemerge // CONSOLE // TEST[setup:twitter] +Force-merging can be useful with time-based indices and when using +<>. In these cases each index only receives +indexing traffic for a certain period of time, and once an index will receive +no more writes its shards can be force-merged down to a single segment: + +[source,js] +-------------------------------------------------- +POST /logs-000001/_forcemerge?max_num_segments=1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/logs-000001/twitter/] + +This can be a good idea because single-segment shards can sometimes use simpler +and more efficient data structures to perform searches. + [float] [[forcemerge-parameters]] -=== Request Parameters +==== Request Parameters The force merge API accepts the following request parameters: @@ -52,7 +73,7 @@ POST /kimchy/_forcemerge?only_expunge_deletes=false&max_num_segments=100&flush=t [float] [[forcemerge-multi-index]] -=== Multi Index +==== Multi Index The force merge API can be applied to more than one index with a single call, or even on `_all` the indices. Multi index operations are executed one shard at a diff --git a/docs/reference/indices/get-field-mapping.asciidoc b/docs/reference/indices/get-field-mapping.asciidoc index 13e80d5cc860e..2223231436624 100644 --- a/docs/reference/indices/get-field-mapping.asciidoc +++ b/docs/reference/indices/get-field-mapping.asciidoc @@ -1,5 +1,5 @@ [[indices-get-field-mapping]] -== Get Field Mapping +=== Get Field Mapping The get field mapping API allows you to retrieve mapping definitions for one or more fields. This is useful when you do not need the complete type mapping returned by @@ -59,7 +59,7 @@ For which the response is: // TESTRESPONSE [float] -=== Multiple Indices and Fields +==== Multiple Indices and Fields The get field mapping API can be used to get the mapping of multiple fields from more than one index with a single call. General usage of the API follows the @@ -81,7 +81,7 @@ GET /_all/_mapping/field/*.id // TEST[s/^/PUT kimchy\nPUT book\n/] [float] -=== Specifying fields +==== Specifying fields The get mapping api allows you to specify a comma-separated list of fields. @@ -168,7 +168,7 @@ returns: // TESTRESPONSE [float] -=== Other options +==== Other options [horizontal] `include_defaults`:: diff --git a/docs/reference/indices/get-index.asciidoc b/docs/reference/indices/get-index.asciidoc index 24abc33a6929a..561362f27ff4f 100644 --- a/docs/reference/indices/get-index.asciidoc +++ b/docs/reference/indices/get-index.asciidoc @@ -1,5 +1,5 @@ [[indices-get-index]] -== Get Index +=== Get Index The get index API allows to retrieve information about one or more indexes. diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc index fbd4a860b31e8..69d4bb622336d 100644 --- a/docs/reference/indices/get-mapping.asciidoc +++ b/docs/reference/indices/get-mapping.asciidoc @@ -1,5 +1,5 @@ [[indices-get-mapping]] -== Get Mapping +=== Get Mapping The get mapping API allows to retrieve mapping definitions for an index or index/type. @@ -16,7 +16,7 @@ in responses no longer contain a type name by default, you can still request the through the parameter include_type_name. For more details, please see <>. [float] -=== Multiple Indices +==== Multiple Indices The get mapping API can be used to get more than one index with a single call. General usage of the API follows the following syntax: diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc index fe539b5cad0b3..7be8760232b59 100644 --- a/docs/reference/indices/get-settings.asciidoc +++ b/docs/reference/indices/get-settings.asciidoc @@ -1,5 +1,5 @@ [[indices-get-settings]] -== Get Settings +=== Get Settings The get settings API allows to retrieve settings of index/indices: @@ -11,7 +11,7 @@ GET /twitter/_settings // TEST[setup:twitter] [float] -=== Multiple Indices and Types +==== Multiple Indices and Types The get settings API can be used to get settings for more than one index with a single call. General usage of the API follows the @@ -33,7 +33,7 @@ GET /log_2013_*/_settings // TEST[s/^/PUT kimchy\nPUT log_2013_01_01\n/] [float] -=== Filtering settings by name +==== Filtering settings by name The settings that are returned can be filtered with wildcard matching as follows: diff --git a/docs/reference/indices/indices-exists.asciidoc b/docs/reference/indices/indices-exists.asciidoc index 29bf2847b0281..4a1b9c36cac9c 100644 --- a/docs/reference/indices/indices-exists.asciidoc +++ b/docs/reference/indices/indices-exists.asciidoc @@ -1,5 +1,5 @@ [[indices-exists]] -== Indices Exists +=== Indices Exists Used to check if the index (indices) exists or not. For example: diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc index 4ba434ecbbb6e..efbd289758a6b 100644 --- a/docs/reference/indices/open-close.asciidoc +++ b/docs/reference/indices/open-close.asciidoc @@ -1,5 +1,5 @@ [[indices-open-close]] -== Open / Close Index API +=== Open / Close Index API The open and close index APIs allow to close an index, and later on opening it. @@ -80,7 +80,7 @@ Closed indices consume a significant amount of disk-space which can cause proble API by setting `cluster.indices.close.enable` to `false`. The default is `true`. [float] -=== Wait For Active Shards +==== Wait For Active Shards Because opening or closing an index allocates its shards, the <> setting on diff --git a/docs/reference/indices/put-mapping.asciidoc b/docs/reference/indices/put-mapping.asciidoc index bdf899c6c8180..d5d73f8fc3f23 100644 --- a/docs/reference/indices/put-mapping.asciidoc +++ b/docs/reference/indices/put-mapping.asciidoc @@ -1,5 +1,5 @@ [[indices-put-mapping]] -== Put Mapping +=== Put Mapping The PUT mapping API allows you to add fields to an existing index or to change search only settings of existing fields. @@ -28,7 +28,7 @@ types in requests is now deprecated, a type can still be provided if the request include_type_name is set. For more details, please see <>. [float] -=== Multi-index +==== Multi-index The PUT mapping API can be applied to multiple indices with a single request. For example, we can update the `twitter-1` and `twitter-2` mappings at the same time: @@ -55,7 +55,7 @@ PUT /twitter-1,twitter-2/_mapping <1> [[updating-field-mappings]] [float] -=== Updating field mappings +==== Updating field mappings In general, the mapping for existing fields cannot be updated. There are some exceptions to this rule. For instance: diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 6e03ddd16b71b..d6f7fafed2c5a 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -1,5 +1,5 @@ [[indices-recovery]] -== Indices Recovery +=== Indices Recovery The indices recovery API provides insight into on-going index shard recoveries. Recovery status may be reported for specific indices, or cluster-wide. diff --git a/docs/reference/indices/refresh.asciidoc b/docs/reference/indices/refresh.asciidoc index 1e27ace362532..743ea8c19e6b1 100644 --- a/docs/reference/indices/refresh.asciidoc +++ b/docs/reference/indices/refresh.asciidoc @@ -1,5 +1,5 @@ [[indices-refresh]] -== Refresh +=== Refresh The refresh API allows to explicitly refresh one or more index, making all operations performed since the last refresh available for search. @@ -15,7 +15,7 @@ POST /twitter/_refresh // TEST[setup:twitter] [float] -=== Multi Index +==== Multi Index The refresh API can be applied to more than one index with a single call, or even on `_all` the indices. diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index b5037d0a94233..17336fbc45855 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -1,5 +1,5 @@ [[indices-rollover-index]] -== Rollover Index +=== Rollover Index The rollover index API rolls an <> to a new index when the existing index meets a condition you provide. You can use this API to retire @@ -88,7 +88,7 @@ The above request might return the following response: <3> The result of each condition. [float] -=== Naming the new index +==== Naming the new index If the name of the existing index ends with `-` and a number -- e.g. `logs-000001` -- then the name of the new index will follow the same pattern, @@ -113,7 +113,7 @@ POST /my_alias/_rollover/my_new_index_name // TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/] [float] -=== Using date math with the rollover API +==== Using date math with the rollover API It can be useful to use <> to name the rollover index according to the date that the index rolled over, e.g. @@ -193,7 +193,7 @@ GET /%3Clogs-%7Bnow%2Fd%7D-*%3E%2C%3Clogs-%7Bnow%2Fd-1d%7D-*%3E%2C%3Clogs-%7Bnow // TEST[s/now/2016.10.31||/] [float] -=== Defining the new index +==== Defining the new index The settings, mappings, and aliases for the new index are taken from any matching <>. Additionally, you can specify @@ -226,7 +226,7 @@ POST /logs_write/_rollover // CONSOLE [float] -=== Dry run +==== Dry run The rollover API supports `dry_run` mode, where request conditions can be checked without performing the actual rollover: @@ -252,7 +252,7 @@ POST /logs_write/_rollover?dry_run // CONSOLE [float] -=== Wait For Active Shards +==== Wait For Active Shards Because the rollover operation creates a new index to rollover to, the <> setting on @@ -260,7 +260,7 @@ index creation applies to the rollover action as well. [[indices-rollover-is-write-index]] [float] -=== Write Index Alias Behavior +==== Write Index Alias Behavior The rollover alias when rolling over a write index that has `is_write_index` explicitly set to `true` is not swapped during rollover actions. Since having an alias point to multiple indices is ambiguous in distinguishing diff --git a/docs/reference/indices/segments.asciidoc b/docs/reference/indices/segments.asciidoc index 614bd8852b7ac..bc204a0a4a577 100644 --- a/docs/reference/indices/segments.asciidoc +++ b/docs/reference/indices/segments.asciidoc @@ -1,5 +1,5 @@ [[indices-segments]] -== Indices Segments +=== Indices Segments Provide low level segments information that a Lucene index (shard level) is built with. Allows to be used to provide more information on the @@ -119,7 +119,7 @@ compound:: Whether the segment is stored in a compound file. When true, this attributes:: Contains information about whether high compression was enabled [float] -=== Verbose mode +==== Verbose mode To add additional information that can be used for debugging, use the `verbose` flag. diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index 4788e66768f21..98697149adc1f 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -1,5 +1,5 @@ [[indices-shards-stores]] -== Indices Shard Stores +=== Indices Shard Stores Provides store information for shard copies of indices. Store information reports on which nodes shard copies exist, the shard diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index aa73d3d1350bb..283bf59080c72 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -1,5 +1,5 @@ [[indices-shrink-index]] -== Shrink Index +=== Shrink Index The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The requested number of primary shards in the target index @@ -26,7 +26,7 @@ Shrinking works as follows: had just been re-opened. [float] -=== Preparing an index for shrinking +==== Preparing an index for shrinking In order to shrink an index, the index must be marked as read-only, and a (primary or replica) copy of every shard in the index must be relocated to the @@ -58,7 +58,7 @@ with the <>, or the <>, or the <> can be used to wait @@ -153,7 +153,7 @@ become `active`. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. [float] -=== Wait For Active Shards +==== Wait For Active Shards Because the shrink operation creates a new index to shrink the shards to, the <> setting diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 4b9aaa09a94b6..dd7b1213cb39f 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -1,5 +1,5 @@ [[indices-split-index]] -== Split Index +=== Split Index The split index API allows you to split an existing index into a new index, where each original primary shard is split into two or more primary shards in @@ -23,7 +23,7 @@ shards in the original index. The default is designed to allow you to split by factors of 2 up to a maximum of 1024 shards. However, the original number of primary shards must taken into account. For instance, an index created with 5 primary shards could be split into 10, 20, 40, 80, 160, 320, or a -maximum of 740 shards (with a single split action or multiple split actions). +maximum of 640 shards (with a single split action or multiple split actions). If the original index contains one primary shard (or a multi-shard index has been <> down to a single primary shard), then the @@ -32,7 +32,7 @@ properties of the default number of routing shards will then apply to the newly split index. [float] -=== How does splitting work? +==== How does splitting work? Splitting works as follows: @@ -51,7 +51,7 @@ Splitting works as follows: [float] [[incremental-resharding]] -=== Why doesn't Elasticsearch support incremental resharding? +==== Why doesn't Elasticsearch support incremental resharding? Going from `N` shards to `N+1` shards, aka. incremental resharding, is indeed a feature that is supported by many key-value stores. Adding a new shard and @@ -81,7 +81,7 @@ old and new indices have respectively +M+ and +N+ shards, this has no overhead compared to searching an index that would have +M+N+ shards. [float] -=== Preparing an index for splitting +==== Preparing an index for splitting Create a new index: @@ -117,7 +117,7 @@ PUT /my_source_index/_settings changes like deleting the index. [float] -=== Splitting an index +==== Splitting an index To split `my_source_index` into a new index called `my_target_index`, issue the following request: @@ -179,7 +179,7 @@ POST my_source_index/_split/my_target_index NOTE: Mappings may not be specified in the `_split` request. [float] -=== Monitoring the split process +==== Monitoring the split process The split process can be monitored with the <>, or the <> can be used to wait @@ -198,7 +198,7 @@ become `active`. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. [float] -=== Wait For Active Shards +==== Wait For Active Shards Because the split operation creates a new index to split the shards to, the <> setting diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc index 9ccd78f8f5df2..c85d63c8e579e 100644 --- a/docs/reference/indices/stats.asciidoc +++ b/docs/reference/indices/stats.asciidoc @@ -1,5 +1,5 @@ [[indices-stats]] -== Indices Stats +=== Indices Stats Indices level stats provide statistics on different operations happening on an index. The API provides statistics on the index level scope diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc index c9ac3e61c29b5..60e65c3139454 100644 --- a/docs/reference/indices/templates.asciidoc +++ b/docs/reference/indices/templates.asciidoc @@ -1,5 +1,5 @@ [[indices-templates]] -== Index Templates +=== Index Templates Index templates allow you to define templates that will automatically be applied when new indices are created. The templates include both @@ -78,7 +78,7 @@ actual index name that the template gets applied to, during index creation. [float] [[delete]] -=== Deleting a Template +==== Deleting a Template Index templates are identified by a name (in the above case `template_1`) and can be deleted as well: @@ -91,7 +91,7 @@ DELETE /_template/template_1 [float] [[getting]] -=== Getting templates +==== Getting templates Index templates are identified by a name (in the above case `template_1`) and can be retrieved using the following: @@ -121,7 +121,7 @@ GET /_template [float] [[indices-templates-exists]] -=== Template exists +==== Template exists Used to check if the template exists or not. For example: @@ -141,7 +141,7 @@ the parameter include_type_name. For more details, please see <>.] diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index 227ff1a73d7a0..9962182edadcb 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -1,5 +1,5 @@ [[indices-update-settings]] -== Update Indices Settings +=== Update Indices Settings Change specific index level settings in real time. @@ -40,7 +40,7 @@ request parameter can be set to `true`. [float] [[bulk]] -=== Bulk Indexing Usage +==== Bulk Indexing Usage For example, the update settings API can be used to dynamically change the index from being more performant for bulk indexing, and then move it @@ -88,7 +88,7 @@ POST /twitter/_forcemerge?max_num_segments=5 [float] [[update-settings-analysis]] -=== Updating Index Analysis +==== Updating Index Analysis It is also possible to define new <> for the index. But it is required to <> the index diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index d0a3c6e06cd66..8b6b1af3e5895 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -47,7 +47,7 @@ Each field has a data `type` which can be: * a type which supports the hierarchical nature of JSON such as <> or <>. * or a specialised type like <>, - <>, or <>. + <>, or <>. It is often useful to index the same field in different ways for different purposes. For instance, a `string` field could be <> as diff --git a/docs/reference/mapping/fields/id-field.asciidoc b/docs/reference/mapping/fields/id-field.asciidoc index 0f4ed15196962..5ccf1cc8ec75c 100644 --- a/docs/reference/mapping/fields/id-field.asciidoc +++ b/docs/reference/mapping/fields/id-field.asciidoc @@ -39,3 +39,9 @@ but doing so is discouraged as it requires to load a lot of data in memory. In case sorting or aggregating on the `_id` field is required, it is advised to duplicate the content of the `_id` field in another field that has `doc_values` enabled. + + +[NOTE] +================================================== +`_id` is limited to 512 bytes in size and larger values will be rejected. +================================================== diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index c9fd2cf186909..78cef20ea81b7 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -36,7 +36,7 @@ available then a number of features are not supported: * The <>, <>, and <> APIs. -* On the fly <>. +* On the fly <>. * The ability to reindex from one Elasticsearch index to another, either to change mappings or analysis, or to upgrade an index to a new major @@ -77,7 +77,7 @@ stored. WARNING: Removing fields from the `_source` has similar downsides to disabling `_source`, especially the fact that you cannot reindex documents from one Elasticsearch index to another. Consider using -<> instead. +<> instead. The `includes`/`excludes` parameters (which also accept wildcards) can be used as follows: diff --git a/docs/reference/mapping/params/eager-global-ordinals.asciidoc b/docs/reference/mapping/params/eager-global-ordinals.asciidoc index 8973be951129c..162049ec1323f 100644 --- a/docs/reference/mapping/params/eager-global-ordinals.asciidoc +++ b/docs/reference/mapping/params/eager-global-ordinals.asciidoc @@ -30,7 +30,7 @@ efficiently compressed. By default, global ordinals are loaded at search-time, which is the right trade-off if you are optimizing for indexing speed. However, if you are more -interested in search speed, it could be interesting to set +interested in search speed, it could be beneficial to set `eager_global_ordinals: true` on fields that you plan to use in terms aggregations: @@ -49,9 +49,25 @@ PUT my_index/_mapping // CONSOLE // TEST[s/^/PUT my_index\n/] -This will shift the cost from search-time to refresh-time. Elasticsearch will -make sure that global ordinals are built before publishing updates to the -content of the index. +This will shift the cost of building the global ordinals from search-time to +refresh-time. Elasticsearch will make sure that global ordinals are built +before exposing to searches any changes to the content of the index. +Elasticsearch will also eagerly build global ordinals when starting a new copy +of a shard, such as when increasing the number of replicas or when relocating a +shard onto a new node. + +If a shard has been <> down to a single +segment then its global ordinals are identical to the ordinals for its unique +segment, which means there is no extra cost for using global ordinals on such a +shard. Note that for performance reasons you should only force-merge an index +to which you will never write again. + +On a <>, global ordinals are discarded after each +search and rebuilt again on the next search if needed or if +`eager_global_ordinals` is set. This means `eager_global_ordinals` should not +be used on frozen indices. Instead, force-merge an index to a single segment +before freezing it so that global ordinals need not be built separately on each +search. If you ever decide that you do not need to run `terms` aggregations on this field anymore, then you can disable eager loading of global ordinals at any diff --git a/docs/reference/mapping/params/store.asciidoc b/docs/reference/mapping/params/store.asciidoc index d3ebe13d4ad62..7316499b6a42d 100644 --- a/docs/reference/mapping/params/store.asciidoc +++ b/docs/reference/mapping/params/store.asciidoc @@ -9,7 +9,7 @@ Usually this doesn't matter. The field value is already part of the <>, which is stored by default. If you only want to retrieve the value of a single field or of a few fields, instead of the whole `_source`, then this can be achieved with -<>. +<>. In certain situations it can make sense to `store` a field. For instance, if you have a document with a `title`, a `date`, and a very large `content` diff --git a/docs/reference/mapping/params/term-vector.asciidoc b/docs/reference/mapping/params/term-vector.asciidoc index ff05539522efc..7a97955c1734f 100644 --- a/docs/reference/mapping/params/term-vector.asciidoc +++ b/docs/reference/mapping/params/term-vector.asciidoc @@ -8,6 +8,8 @@ Term vectors contain information about the terms produced by the * the position (or order) of each term. * the start and end character offsets mapping the term to its origin in the original string. +* payloads (if they are available) — user-defined binary data + associated with each term position. These term vectors can be stored so that they can be retrieved for a particular document. @@ -20,9 +22,11 @@ The `term_vector` setting accepts: `with_positions`:: Terms and positions are stored. `with_offsets`:: Terms and character offsets are stored. `with_positions_offsets`:: Terms, positions, and character offsets are stored. +`with_positions_payloads`:: Terms, positions, and payloads are stored. +`with_positions_offsets_payloads`:: Terms, positions, offsets and payloads are stored. -The fast vector highlighter requires `with_positions_offsets`. The term -vectors API can retrieve whatever is stored. +The fast vector highlighter requires `with_positions_offsets`. +<> can retrieve whatever is stored. WARNING: Setting `with_positions_offsets` will double the size of a field's index. diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index b3ad39677d863..6794118537cab 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -30,7 +30,7 @@ string:: <> and <> === Specialised datatypes <>:: `ip` for IPv4 and IPv6 addresses -<>:: +<>:: `completion` to provide auto-complete suggestions <>:: `token_count` to count the number of tokens in a string {plugins}/mapper-murmur3.html[`mapper-murmur3`]:: `murmur3` to compute hashes of values at index-time and store them in the index @@ -81,14 +81,14 @@ include::types/array.asciidoc[] include::types/binary.asciidoc[] -include::types/range.asciidoc[] - include::types/boolean.asciidoc[] include::types/date.asciidoc[] include::types/date_nanos.asciidoc[] +include::types/dense-vector.asciidoc[] + include::types/flattened.asciidoc[] include::types/geo-point.asciidoc[] @@ -97,6 +97,8 @@ include::types/geo-shape.asciidoc[] include::types/ip.asciidoc[] +include::types/parent-join.asciidoc[] + include::types/keyword.asciidoc[] include::types/nested.asciidoc[] @@ -105,20 +107,18 @@ include::types/numeric.asciidoc[] include::types/object.asciidoc[] -include::types/text.asciidoc[] - -include::types/token-count.asciidoc[] - include::types/percolator.asciidoc[] -include::types/parent-join.asciidoc[] +include::types/range.asciidoc[] include::types/rank-feature.asciidoc[] include::types/rank-features.asciidoc[] -include::types/dense-vector.asciidoc[] +include::types/search-as-you-type.asciidoc[] include::types/sparse-vector.asciidoc[] -include::types/search-as-you-type.asciidoc[] +include::types/text.asciidoc[] + +include::types/token-count.asciidoc[] \ No newline at end of file diff --git a/docs/reference/mapping/types/alias.asciidoc b/docs/reference/mapping/types/alias.asciidoc index c70d96a7e572d..89229ce0bb9eb 100644 --- a/docs/reference/mapping/types/alias.asciidoc +++ b/docs/reference/mapping/types/alias.asciidoc @@ -1,5 +1,8 @@ [[alias]] === Alias datatype +++++ +Alias +++++ An `alias` mapping defines an alternate name for a field in the index. The alias can be used in place of the target field in <> requests, diff --git a/docs/reference/mapping/types/binary.asciidoc b/docs/reference/mapping/types/binary.asciidoc index 22e107dab565d..41478d1965a61 100644 --- a/docs/reference/mapping/types/binary.asciidoc +++ b/docs/reference/mapping/types/binary.asciidoc @@ -1,5 +1,8 @@ [[binary]] === Binary datatype +++++ +Binary +++++ The `binary` type accepts a binary value as a https://en.wikipedia.org/wiki/Base64[Base64] encoded string. The field is not diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 962022060b65b..790b5013d1af0 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -1,5 +1,8 @@ [[boolean]] === Boolean datatype +++++ +Boolean +++++ Boolean fields accept JSON `true` and `false` values, but can also accept strings which are interpreted as either true or false: diff --git a/docs/reference/mapping/types/date.asciidoc b/docs/reference/mapping/types/date.asciidoc index 94aadb46fb2b6..3a5c480584613 100644 --- a/docs/reference/mapping/types/date.asciidoc +++ b/docs/reference/mapping/types/date.asciidoc @@ -1,5 +1,8 @@ [[date]] === Date datatype +++++ +Date +++++ JSON doesn't have a date datatype, so dates in Elasticsearch can either be: diff --git a/docs/reference/mapping/types/date_nanos.asciidoc b/docs/reference/mapping/types/date_nanos.asciidoc index 45d53a19b72f1..5fb84068bd23e 100644 --- a/docs/reference/mapping/types/date_nanos.asciidoc +++ b/docs/reference/mapping/types/date_nanos.asciidoc @@ -1,5 +1,8 @@ [[date_nanos]] -=== date_nanos datatype +=== Date nanoseconds datatype +++++ +Date nanoseconds +++++ This datatype is an addition to the `date` datatype. However there is an important distinction between the two. The existing `date` datatype stores diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index 7ea04b42330c2..9462fe544af9d 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -2,6 +2,9 @@ [testenv="basic"] [[dense-vector]] === Dense vector datatype +++++ +Dense vector +++++ experimental[] diff --git a/docs/reference/mapping/types/flattened.asciidoc b/docs/reference/mapping/types/flattened.asciidoc index 80fd72c3dcc1f..317fd54c1a090 100644 --- a/docs/reference/mapping/types/flattened.asciidoc +++ b/docs/reference/mapping/types/flattened.asciidoc @@ -3,6 +3,9 @@ [[flattened]] === Flattened datatype +++++ +Flattened +++++ By default, each subfield in an object is mapped and indexed separately. If the names or types of the subfields are not known in advance, then they are diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 51e137fbc33b6..4c21bebc8fe46 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -1,5 +1,8 @@ [[geo-point]] === Geo-point datatype +++++ +Geo-point +++++ Fields of type `geo_point` accept latitude-longitude pairs, which can be used: @@ -11,7 +14,7 @@ Fields of type `geo_point` accept latitude-longitude pairs, which can be used: * to integrate distance into a document's <>. * to <> documents by distance. -There are four ways that a geo-point may be specified, as demonstrated below: +There are five ways that a geo-point may be specified, as demonstrated below: [source,js] -------------------------------------------------- @@ -53,10 +56,16 @@ PUT my_index/_doc/4 "location": [ -71.34, 41.12 ] <4> } +PUT my_index/_doc/5 +{ + "text": "Geo-point as a WKT POINT primitive", + "location" : "POINT (-71.34 41.12)" <5> +} + GET my_index/_search { "query": { - "geo_bounding_box": { <5> + "geo_bounding_box": { <6> "location": { "top_left": { "lat": 42, @@ -76,7 +85,9 @@ GET my_index/_search <2> Geo-point expressed as a string with the format: `"lat,lon"`. <3> Geo-point expressed as a geohash. <4> Geo-point expressed as an array with the format: [ `lon`, `lat`] -<5> A geo-bounding box query which finds all geo-points that fall inside the box. +<5> Geo-point expressed as a http://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text] +POINT with the format: `"POINT(lon lat)"` +<6> A geo-bounding box query which finds all geo-points that fall inside the box. [IMPORTANT] .Geo-points expressed as an array or string diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index be5308db41a3d..0fd370ec4e835 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -1,5 +1,8 @@ [[geo-shape]] -=== Geo-Shape datatype +=== Geo-shape datatype +++++ +Geo-shape +++++ The `geo_shape` datatype facilitates the indexing of and searching with arbitrary geo shapes such as rectangles and polygons. It should be diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 46ca444c9558d..a1a56cf69fd07 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -1,5 +1,8 @@ [[ip]] === IP datatype +++++ +IP +++++ An `ip` field can index/store either https://en.wikipedia.org/wiki/IPv4[IPv4] or https://en.wikipedia.org/wiki/IPv6[IPv6] addresses. diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index 8ac0983dc9550..61a603b4f2d1f 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -1,5 +1,8 @@ [[keyword]] === Keyword datatype +++++ +Keyword +++++ A field to index structured content such as email addresses, hostnames, status codes, zip codes or tags. diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 63bb4591369e5..3113e86599c9a 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -1,5 +1,8 @@ [[nested]] === Nested datatype +++++ +Nested +++++ The `nested` type is a specialised version of the <> datatype that allows arrays of objects to be indexed in a way that they can be queried @@ -171,8 +174,8 @@ For instance, if a string field within a nested document has during the highlighting, these offsets will not be available during the main highlighting phase. Instead, highlighting needs to be performed via <>. The same consideration applies when loading -fields during a search through <> -or <>. +fields during a search through <> +or <>. ============================================= diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index f2977957ff463..7298b54873b90 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -1,5 +1,8 @@ [[number]] === Numeric datatypes +++++ +Numeric +++++ The following numeric types are supported: diff --git a/docs/reference/mapping/types/object.asciidoc b/docs/reference/mapping/types/object.asciidoc index f5b9a9df85617..e127415c6181c 100644 --- a/docs/reference/mapping/types/object.asciidoc +++ b/docs/reference/mapping/types/object.asciidoc @@ -1,5 +1,8 @@ [[object]] === Object datatype +++++ +Object +++++ JSON documents are hierarchical in nature: the document may contain inner objects which, in turn, may contain inner objects themselves: diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc index 39bcaa96d7764..14c7b7b275891 100644 --- a/docs/reference/mapping/types/parent-join.asciidoc +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -1,5 +1,8 @@ [[parent-join]] -=== `join` datatype +=== Join datatype +++++ +Join +++++ The `join` datatype is a special field that creates parent/child relation within documents of the same index. diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 00270fff38538..cdc10bcaa036a 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -1,5 +1,8 @@ [[percolator]] === Percolator type +++++ +Percolator +++++ The `percolator` field type parses a json structure into a native query and stores that query, so that the <> diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 79c9e6629c696..91bbbd0d6d044 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -1,5 +1,8 @@ [[range]] === Range datatypes +++++ +Range +++++ The following range types are supported: diff --git a/docs/reference/mapping/types/rank-feature.asciidoc b/docs/reference/mapping/types/rank-feature.asciidoc index 780a68216f49e..d066d0452d353 100644 --- a/docs/reference/mapping/types/rank-feature.asciidoc +++ b/docs/reference/mapping/types/rank-feature.asciidoc @@ -1,5 +1,8 @@ [[rank-feature]] === Rank feature datatype +++++ +Rank feature +++++ A `rank_feature` field can index numbers so that they can later be used to boost documents in queries with a <> query. diff --git a/docs/reference/mapping/types/rank-features.asciidoc b/docs/reference/mapping/types/rank-features.asciidoc index 9bc960b7f8351..b80db43651dd7 100644 --- a/docs/reference/mapping/types/rank-features.asciidoc +++ b/docs/reference/mapping/types/rank-features.asciidoc @@ -1,5 +1,8 @@ [[rank-features]] === Rank features datatype +++++ +Rank features +++++ A `rank_features` field can index numeric feature vectors, so that they can later be used to boost documents in queries with a diff --git a/docs/reference/mapping/types/search-as-you-type.asciidoc b/docs/reference/mapping/types/search-as-you-type.asciidoc index 7ddc4f12fad5c..21a1a46cf8c04 100644 --- a/docs/reference/mapping/types/search-as-you-type.asciidoc +++ b/docs/reference/mapping/types/search-as-you-type.asciidoc @@ -1,5 +1,8 @@ [[search-as-you-type]] -=== Search as you type datatype +=== Search-as-you-type datatype +++++ +Search-as-you-type +++++ The `search_as_you_type` field type is a text-like field that is optimized to provide out-of-the-box support for queries that serve an as-you-type completion diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index de63a1a822a0a..7b437031513b7 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -2,6 +2,9 @@ [testenv="basic"] [[sparse-vector]] === Sparse vector datatype +++++ +Sparse vector +++++ experimental[] diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 8ddecd88d08a9..46477972d5cf9 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -1,5 +1,8 @@ [[text]] === Text datatype +++++ +Text +++++ A field to index full-text values, such as the body of an email or the description of a product. These fields are `analyzed`, that is they are passed through an diff --git a/docs/reference/mapping/types/token-count.asciidoc b/docs/reference/mapping/types/token-count.asciidoc index d574c25e93d19..a435be1e54d51 100644 --- a/docs/reference/mapping/types/token-count.asciidoc +++ b/docs/reference/mapping/types/token-count.asciidoc @@ -1,5 +1,8 @@ [[token-count]] === Token count datatype +++++ +Token count +++++ A field of type `token_count` is really an <> field which accepts string values, analyzes them, then indexes the number of tokens in the diff --git a/docs/reference/migration/apis/deprecation.asciidoc b/docs/reference/migration/apis/deprecation.asciidoc index b52079726d868..b9561dcd3b901 100644 --- a/docs/reference/migration/apis/deprecation.asciidoc +++ b/docs/reference/migration/apis/deprecation.asciidoc @@ -10,29 +10,23 @@ The deprecation API is to be used to retrieve information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. -[float] -==== Request +[[migration-api-request]] +==== {api-request-title} `GET /_migration/deprecations` + `GET //_migration/deprecations` -//=== Description - -[float] -==== Path Parameters +[[migration-api-path-params]] +==== {api-path-parms-title} `index_name`:: - (string) Identifier for the index. It can be an index name or a wildcard - expression. When you specify this parameter, only index-level deprecations for - the specified indices are returned. - -//=== Query Parameters - -//=== Authorization + (Optional, string) Identifier for the index. It can be an index name or a + wildcard expression. When you specify this parameter, only index-level + deprecations for the specified indices are returned. -[float] -==== Examples +[[migration-api-example]] +==== {api-examples-title} To see the list of offenders in your cluster, submit a GET request to the `_migration/deprecations` endpoint: diff --git a/docs/reference/migration/migrate_8_0/http.asciidoc b/docs/reference/migration/migrate_8_0/http.asciidoc index 6209beeaac4e2..70ac03f1fcd0a 100644 --- a/docs/reference/migration/migrate_8_0/http.asciidoc +++ b/docs/reference/migration/migrate_8_0/http.asciidoc @@ -12,4 +12,12 @@ ==== Removal of old HTTP settings The `http.tcp_no_delay` setting was deprecated in 7.x and has been removed in 8.0. It has been replaced by -`http.tcp.no_delay`. \ No newline at end of file +`http.tcp.no_delay`. + +[float] +==== Changes to Encoding Plus Signs in URLs + +Starting in version 7.4, a `+` in a URL will be encoded as `%2B` by all REST API functionality. Prior versions handled a `+` as a single space. +If your application requires handling `+` as a single space you can return to the old behaviour by setting the system property +`es.rest.url_plus_as_space` to `true`. Note that this behaviour is deprecated and setting this system property to `true` will cease +to be supported in version 8. \ No newline at end of file diff --git a/docs/reference/migration/migrate_8_0/node.asciidoc b/docs/reference/migration/migrate_8_0/node.asciidoc index b1187e88b5d90..99131b307fa86 100644 --- a/docs/reference/migration/migrate_8_0/node.asciidoc +++ b/docs/reference/migration/migrate_8_0/node.asciidoc @@ -35,4 +35,12 @@ each of these folders to an appropriate location and then configure the corresponding node to use this location for its data path. If your nodes each have more than one data path in their `path.data` settings then you should move all the corresponding subfolders in parallel. Each node uses the same subfolder -(e.g. `nodes/2`) across all its data paths. \ No newline at end of file +(e.g. `nodes/2`) across all its data paths. + +[float] +==== Rejection of ancient closed indices + +In earlier versions a node would start up even if it had data from indices +created in a version before the previous major version, as long as those +indices were closed. {es} now ensures that it is compatible with every index, +open or closed, at startup time. diff --git a/docs/reference/ml/aggregations.asciidoc b/docs/reference/ml/anomaly-detection/aggregations.asciidoc similarity index 100% rename from docs/reference/ml/aggregations.asciidoc rename to docs/reference/ml/anomaly-detection/aggregations.asciidoc diff --git a/docs/reference/ml/apis/close-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc similarity index 88% rename from docs/reference/ml/apis/close-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/close-job.asciidoc index 2a38648e48fe5..de64754b098d2 100644 --- a/docs/reference/ml/apis/close-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc @@ -57,19 +57,19 @@ results the job might have recently produced or might produce in the future. [[ml-close-job-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. It can be a job identifier, a group name, or - a wildcard expression. +``:: + (Required, string) Identifier for the job. It can be a job identifier, a group + name, or a wildcard expression. [[ml-close-job-query-parms]] ==== {api-query-parms-title} -`force` (Optional):: - (boolean) Use to close a failed job, or to forcefully close a job which has not - responded to its initial close request. +`force`:: + (Optional, boolean) Use to close a failed job, or to forcefully close a job + which has not responded to its initial close request. -`timeout` (Optional):: - (time units) Controls the time to wait until a job has closed. +`timeout`:: + (Optional, time units) Controls the time to wait until a job has closed. The default value is 30 minutes. [[ml-close-job-example]] diff --git a/docs/reference/ml/apis/datafeedresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/datafeedresource.asciidoc similarity index 100% rename from docs/reference/ml/apis/datafeedresource.asciidoc rename to docs/reference/ml/anomaly-detection/apis/datafeedresource.asciidoc diff --git a/docs/reference/ml/apis/delete-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc similarity index 84% rename from docs/reference/ml/apis/delete-calendar-event.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc index 0aa9ce5cc8d92..07e4906445b4c 100644 --- a/docs/reference/ml/apis/delete-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc @@ -30,12 +30,12 @@ events and delete the calendar, see the [[ml-delete-calendar-event-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the calendar. +``:: + (Required, string) Identifier for the calendar. -`` (Required):: - (string) Identifier for the scheduled event. You can obtain this identifier - by using the <>. +``:: + (Required, string) Identifier for the scheduled event. You can obtain this + identifier by using the <>. [[ml-delete-calendar-event-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc similarity index 84% rename from docs/reference/ml/apis/delete-calendar-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc index a555b3d3b922b..096918a821195 100644 --- a/docs/reference/ml/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc @@ -23,12 +23,12 @@ Deletes jobs from a calendar. [[ml-delete-calendar-job-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the calendar. +``:: + (Required, string) Identifier for the calendar. -`` (Required):: - (string) An identifier for the job. It can be a job identifier, a group name, - or a comma-separated list of jobs or groups. +``:: + (Required, string) An identifier for the job. It can be a job identifier, a + group name, or a comma-separated list of jobs or groups. [[ml-delete-calendar-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc similarity index 94% rename from docs/reference/ml/apis/delete-calendar.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc index 065c117c49c63..c38a15f1d11e5 100644 --- a/docs/reference/ml/apis/delete-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc @@ -29,8 +29,8 @@ calendar. [[ml-delete-calendar-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the calendar. +``:: + (Required, string) Identifier for the calendar. [[ml-delete-calendar-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc similarity index 76% rename from docs/reference/ml/apis/delete-datafeed.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc index 23917bf9e3365..92a9a9dc82170 100644 --- a/docs/reference/ml/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc @@ -18,28 +18,24 @@ Deletes an existing {dfeed}. [[ml-delete-datafeed-prereqs]] ==== {api-prereq-title} +* Unless you use the `force` parameter, you must stop the {dfeed} before you +can delete it. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See {stack-ov}/security-privileges.html[Security privileges]. -[[ml-delete-datafeed-desc]] -==== {api-description-title} - -NOTE: Unless you use the `force` parameter, you must stop the {dfeed} before you -can delete it. - [[ml-delete-datafeed-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dfeed}. +``:: + (Required, string) Identifier for the {dfeed}. [[ml-delete-datafeed-query-parms]] ==== {api-query-parms-title} -`force` (Optional):: - (boolean) Use to forcefully delete a started {dfeed}; this method is quicker - than stopping and deleting the {dfeed}. +`force`:: + (Optional, boolean) Use to forcefully delete a started {dfeed}; this method is + quicker than stopping and deleting the {dfeed}. [[ml-delete-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-expired-data.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc similarity index 100% rename from docs/reference/ml/apis/delete-expired-data.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc diff --git a/docs/reference/ml/apis/delete-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc similarity index 94% rename from docs/reference/ml/apis/delete-filter.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc index 1962db29ad74e..75fe2533b88c2 100644 --- a/docs/reference/ml/apis/delete-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc @@ -30,8 +30,8 @@ update or delete the job before you can delete the filter. [[ml-delete-filter-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the filter. +``:: + (Required, string) Identifier for the filter. [[ml-delete-filter-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc similarity index 71% rename from docs/reference/ml/apis/delete-forecast.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc index aac054217fced..044bf8a3ea055 100644 --- a/docs/reference/ml/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc @@ -40,28 +40,28 @@ For more information, see [[ml-delete-forecast-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. - -`forecast_id` (Optional):: - (string) A comma-separated list of forecast identifiers. +``:: + (Optional, string) A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all`, the API deletes all forecasts from the job. + +``:: + (Required, string) Required. Identifier for the job. [[ml-delete-forecast-query-parms]] ==== {api-query-parms-title} -`allow_no_forecasts` (Optional):: - (boolean) Specifies whether an error occurs when there are no forecasts. In - particular, if this parameter is set to `false` and there are no forecasts - associated with the job, attempts to delete all forecasts return an error. - The default value is `true`. - -`timeout` (Optional):: - (time units) Specifies the period of time to wait for the completion of the - delete operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +`allow_no_forecasts`:: + (Optional, boolean) Specifies whether an error occurs when there are no + forecasts. In particular, if this parameter is set to `false` and there are no + forecasts associated with the job, attempts to delete all forecasts return an + error. The default value is `true`. + +`timeout`:: + (Optional, time units) Specifies the period of time to wait for the completion + of the delete operation. When this period of time elapses, the API fails and + returns an error. The default value is `30s`. For more information about time + units, see <>. [[ml-delete-forecast-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc similarity index 86% rename from docs/reference/ml/apis/delete-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc index efd172ef5fb60..bd5a74e50e89a 100644 --- a/docs/reference/ml/apis/delete-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc @@ -40,19 +40,19 @@ separated list. [[ml-delete-job-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. [[ml-delete-job-query-parms]] ==== {api-query-parms-title} -`force` (Optional):: - (boolean) Use to forcefully delete an opened job; this method is quicker than - closing and deleting the job. +`force`:: + (Optional, boolean) Use to forcefully delete an opened job; this method is + quicker than closing and deleting the job. -`wait_for_completion` (Optional):: - (boolean) Specifies whether the request should return immediately or wait - until the job deletion completes. Defaults to `true`. +`wait_for_completion`:: + (Optional, boolean) Specifies whether the request should return immediately or + wait until the job deletion completes. Defaults to `true`. [[ml-delete-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc similarity index 91% rename from docs/reference/ml/apis/delete-snapshot.asciidoc rename to docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc index 0e696f2a01139..62a223c4447a0 100644 --- a/docs/reference/ml/apis/delete-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc @@ -30,11 +30,11 @@ the `model_snapshot_id` in the results from the get jobs API. [[ml-delete-snapshot-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. -`` (Required):: - (string) Identifier for the model snapshot. +``:: + (Required, string) Identifier for the model snapshot. [[ml-delete-snapshot-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/eventresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/eventresource.asciidoc similarity index 100% rename from docs/reference/ml/apis/eventresource.asciidoc rename to docs/reference/ml/anomaly-detection/apis/eventresource.asciidoc diff --git a/docs/reference/ml/apis/filterresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/filterresource.asciidoc similarity index 90% rename from docs/reference/ml/apis/filterresource.asciidoc rename to docs/reference/ml/anomaly-detection/apis/filterresource.asciidoc index a9748949ffd58..520a2a99a3c71 100644 --- a/docs/reference/ml/apis/filterresource.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/filterresource.asciidoc @@ -9,7 +9,7 @@ A filter resource has the following properties: (string) A string that uniquely identifies the filter. `description`:: - (array) A description of the filter. + (string) A description of the filter. `items`:: (array of strings) An array of strings which is the filter item list. diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc similarity index 91% rename from docs/reference/ml/apis/find-file-structure.asciidoc rename to docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc index 212e80c7e1bd2..961927ed81e63 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc @@ -58,62 +58,64 @@ chosen. [[ml-find-file-structure-query-parms]] ==== {api-query-parms-title} -`charset` (Optional):: - (string) The file's character set. It must be a character set that is supported - by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or - `EUC-JP`. If this parameter is not specified, the structure finder chooses an - appropriate character set. - -`column_names` (Optional):: - (string) If you have set `format` to `delimited`, you can specify the column names - in a comma-separated list. If this parameter is not specified, the structure - finder uses the column names from the header row of the file. If the file does - not have a header role, columns are named "column1", "column2", "column3", etc. - -`delimiter` (Optional):: - (string) If you have set `format` to `delimited`, you can specify the character used - to delimit the values in each row. Only a single character is supported; the - delimiter cannot have multiple characters. If this parameter is not specified, - the structure finder considers the following possibilities: comma, tab, - semi-colon, and pipe (`|`). - -`explain` (Optional):: - (boolean) If this parameter is set to `true`, the response includes a field - named `explanation`, which is an array of strings that indicate how the - structure finder produced its result. The default value is `false`. - -`format` (Optional):: - (string) The high level structure of the file. Valid values are `ndjson`, `xml`, - `delimited`, and `semi_structured_text`. If this parameter is not specified, - the structure finder chooses one. - -`grok_pattern` (Optional):: - (string) If you have set `format` to `semi_structured_text`, you can specify a Grok - pattern that is used to extract fields from every message in the file. The - name of the timestamp field in the Grok pattern must match what is specified - in the `timestamp_field` parameter. If that parameter is not specified, the - name of the timestamp field in the Grok pattern must match "timestamp". If - `grok_pattern` is not specified, the structure finder creates a Grok pattern. - -`has_header_row` (Optional):: - (boolean) If you have set `format` to `delimited`, you can use this parameter to - indicate whether the column names are in the first row of the file. If this - parameter is not specified, the structure finder guesses based on the similarity of - the first row of the file to other rows. - -`line_merge_size_limit` (Optional):: - (unsigned integer) The maximum number of characters in a message when lines are - merged to form messages while analyzing semi-structured files. The default - is 10000. If you have extremely long messages you may need to increase this, but - be aware that this may lead to very long processing times if the way to group - lines into messages is misdetected. - -`lines_to_sample` (Optional):: - (unsigned integer) The number of lines to include in the structural analysis, - starting from the beginning of the file. The minimum is 2; the default - is 1000. If the value of this parameter is greater than the number of lines in - the file, the analysis proceeds (as long as there are at least two lines in the - file) for all of the lines. + +`charset`:: + (string) Optional. The file's character set. It must be a character set that + is supported by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, + `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure + finder chooses an appropriate character set. + +`column_names`:: + (string) Optional. If you have set `format` to `delimited`, you can specify + the column names in a comma-separated list. If this parameter is not specified, + the structure finder uses the column names from the header row of the file. If + the file does not have a header role, columns are named "column1", "column2", + "column3", etc. + +`delimiter`:: + (string) Optional. If you have set `format` to `delimited`, you can specify + the character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. If this parameter + is not specified, the structure finder considers the following possibilities: + comma, tab, semi-colon, and pipe (`|`). + +`explain`:: + (boolean) Optional. If this parameter is set to `true`, the response includes + a field named `explanation`, which is an array of strings that indicate how + the structure finder produced its result. The default value is `false`. + +`format`:: + (string) Optional. The high level structure of the file. Valid values are + `ndjson`, `xml`, `delimited`, and `semi_structured_text`. If this parameter is + not specified, the structure finder chooses one. + +`grok_pattern`:: + (string) Optional. If you have set `format` to `semi_structured_text`, you can + specify a Grok pattern that is used to extract fields from every message in + the file. The name of the timestamp field in the Grok pattern must match what + is specified in the `timestamp_field` parameter. If that parameter is not + specified, the name of the timestamp field in the Grok pattern must match + "timestamp". If `grok_pattern` is not specified, the structure finder creates + a Grok pattern. + +`has_header_row`:: + (boolean) Optional. If you have set `format` to `delimited`, you can use this + parameter to indicate whether the column names are in the first row of the + file. If this parameter is not specified, the structure finder guesses based + on the similarity of the first row of the file to other rows. + +`line_merge_size_limit`:: + (unsigned integer) Optional. The maximum number of characters in a message + when lines are merged to form messages while analyzing semi-structured files. + The default is `10000`. If you have extremely long messages you may need to + increase this, but be aware that this may lead to very long processing times + if the way to group lines into messages is misdetected. + +`lines_to_sample`:: + (unsigned integer) Optional. The number of lines to include in the structural + analysis, starting from the beginning of the file. The minimum is 2; the + default is `1000`. If the value of this parameter is greater than the number + of lines in the file, the analysis proceeds (as long as there are at least two + lines in the file) for all of the lines. + + -- NOTE: The number of lines and the variation of the lines affects the speed of @@ -124,29 +126,29 @@ efficient to upload a sample file with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. -- -`quote` (Optional):: - (string) If you have set `format` to `delimited`, you can specify the character used - to quote the values in each row if they contain newlines or the delimiter - character. Only a single character is supported. If this parameter is not - specified, the default value is a double quote (`"`). If your delimited file - format does not use quoting, a workaround is to set this argument to a - character that does not appear anywhere in the sample. - -`should_trim_fields` (Optional):: - (boolean) If you have set `format` to `delimited`, you can specify whether values - between delimiters should have whitespace trimmed from them. If this parameter - is not specified and the delimiter is pipe (`|`), the default value is `true`. - Otherwise, the default value is `false`. - -`timeout` (Optional):: - (time) Sets the maximum amount of time that the structure analysis make take. - If the analysis is still running when the timeout expires then it will be - aborted. The default value is 25 seconds. - -`timestamp_field` (Optional):: - (string) The name of the field that contains the primary timestamp of each - record in the file. In particular, if the file were ingested into an index, - this is the field that would be used to populate the `@timestamp` field. + +`quote`:: + (string) Optional. If you have set `format` to `delimited`, you can specify + the character used to quote the values in each row if they contain newlines or + the delimiter character. Only a single character is supported. If this + parameter is not specified, the default value is a double quote (`"`). If your + delimited file format does not use quoting, a workaround is to set this + argument to a character that does not appear anywhere in the sample. + +`should_trim_fields`:: + (boolean) Optional. If you have set `format` to `delimited`, you can specify + whether values between delimiters should have whitespace trimmed from them. If + this parameter is not specified and the delimiter is pipe (`|`), the default + value is `true`. Otherwise, the default value is `false`. + +`timeout`:: + (time) Optional. Sets the maximum amount of time that the structure analysis + make take. If the analysis is still running when the timeout expires then it + will be aborted. The default value is 25 seconds. + +`timestamp_field`:: + (string) Optional. The name of the field that contains the primary timestamp + of each record in the file. In particular, if the file were ingested into an + index, this is the field that would be used to populate the `@timestamp` field. + -- If the `format` is `semi_structured_text`, this field must match the name of the @@ -162,8 +164,8 @@ field (if any) is the primary timestamp field. For structured file formats, it is not compulsory to have a timestamp in the file. -- -`timestamp_format` (Optional):: - (string) The Java time format of the timestamp field in the file. + +`timestamp_format`:: + (string) Optional. The Java time format of the timestamp field in the file. + + -- NOTE: Only a subset of Java time format letter groups are supported: diff --git a/docs/reference/ml/apis/flush-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc similarity index 77% rename from docs/reference/ml/apis/flush-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc index 590f866ca1799..47b961b1d2a49 100644 --- a/docs/reference/ml/apis/flush-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc @@ -36,32 +36,32 @@ opened again before analyzing further data. [[ml-flush-job-path-parms]] ==== {api-path-parms-title} -`` (Required):: -(string) Identifier for the job. +``:: +(string) Required. Identifier for the job. [[ml-flush-job-query-parms]] ==== {api-query-parms-title} -`advance_time` (Optional):: - (string) Specifies to advance to a particular time value. Results are +`advance_time`:: + (string) Optional. Specifies to advance to a particular time value. Results are generated and the model is updated for data from the specified time interval. -`calc_interim` (Optional):: - (boolean) If true, calculates the interim results for the most recent bucket - or all buckets within the latency period. +`calc_interim`:: + (boolean) Optional. If true, calculates the interim results for the most + recent bucket or all buckets within the latency period. -`end` (Optional):: - (string) When used in conjunction with `calc_interim`, specifies the range - of buckets on which to calculate interim results. +`end`:: + (string) Optional. When used in conjunction with `calc_interim`, specifies the + range of buckets on which to calculate interim results. -`skip_time` (Optional):: - (string) Specifies to skip to a particular time value. Results are not - generated and the model is not updated for data from the specified time +`skip_time`:: + (string) Optional. Specifies to skip to a particular time value. Results are + not generated and the model is not updated for data from the specified time interval. -`start` (Optional):: - (string) When used in conjunction with `calc_interim`, specifies the range of - buckets on which to calculate interim results. +`start`:: + (string) Optional. When used in conjunction with `calc_interim`, specifies the + range of buckets on which to calculate interim results. [[ml-flush-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc similarity index 86% rename from docs/reference/ml/apis/forecast.asciidoc rename to docs/reference/ml/anomaly-detection/apis/forecast.asciidoc index d137b2e1be3ce..01ef91228f290 100644 --- a/docs/reference/ml/apis/forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc @@ -36,20 +36,20 @@ forecast. For more information about this property, see <>. [[ml-forecast-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. [[ml-forecast-request-body]] ==== {api-request-body-title} -`duration` (Optional):: - (time units) A period of time that indicates how far into the future to - forecast. For example, `30d` corresponds to 30 days. The default value is 1 +`duration`:: + (Optional, time units) A period of time that indicates how far into the future + to forecast. For example, `30d` corresponds to 30 days. The default value is 1 day. The forecast starts at the last record that was processed. For more information about time units, see <>. -`expires_in` (Optional):: - (time units) The period of time that forecast results are retained. +`expires_in`:: + (Optional, time units) The period of time that forecast results are retained. After a forecast expires, the results are deleted. The default value is 14 days. If set to a value of `0`, the forecast is never automatically deleted. For more information about time units, see <>. diff --git a/docs/reference/ml/apis/get-bucket.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc similarity index 71% rename from docs/reference/ml/apis/get-bucket.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc index 2a73d0f5d3538..f7835f6c75821 100644 --- a/docs/reference/ml/apis/get-bucket.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc @@ -35,45 +35,45 @@ bucket. [[ml-get-bucket-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job +``:: + (Required, string) Identifier for the job -`` (Optional):: - (string) The timestamp of a single bucket result. - If you do not specify this parameter, the API returns information - about all buckets. +``:: + (Optional, string) The timestamp of a single bucket result. If you do not + specify this parameter, the API returns information about all buckets. [[ml-get-bucket-request-body]] ==== {api-request-body-title} -`anomaly_score` (Optional):: - (double) Returns buckets with anomaly scores greater or equal than this value. +`anomaly_score`:: + (Optional, double) Returns buckets with anomaly scores greater or equal than + this value. -`desc` (Optional):: - (boolean) If true, the buckets are sorted in descending order. +`desc`:: + (Optional, boolean) If true, the buckets are sorted in descending order. -`end` (Optional):: - (string) Returns buckets with timestamps earlier than this time. +`end`:: + (Optional, string) Returns buckets with timestamps earlier than this time. -`exclude_interim` (Optional):: - (boolean) If true, the output excludes interim results. - By default, interim results are included. +`exclude_interim`:: + (Optional, boolean) If true, the output excludes interim results. By default, + interim results are included. -`expand` (Optional):: - (boolean) If true, the output includes anomaly records. +`expand`:: + (Optional, boolean) If true, the output includes anomaly records. -`page` (Optional):: +`page`:: `from`::: - (integer) Skips the specified number of buckets. + (Optional, integer) Skips the specified number of buckets. `size`::: - (integer) Specifies the maximum number of buckets to obtain. + (Optional, integer) Specifies the maximum number of buckets to obtain. -`sort` (Optional):: - (string) Specifies the sort field for the requested buckets. - By default, the buckets are sorted by the `timestamp` field. +`sort`:: + (Optional, string) Specifies the sort field for the requested buckets. By + default, the buckets are sorted by the `timestamp` field. -`start` (Optional):: - (string) Returns buckets with timestamps after this time. +`start`:: + (Optional, string) Returns buckets with timestamps after this time. [[ml-get-bucket-results]] ==== {api-response-body-title} diff --git a/docs/reference/ml/apis/get-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc similarity index 64% rename from docs/reference/ml/apis/get-calendar-event.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc index 173a249488684..8ea7ad2af7ad7 100644 --- a/docs/reference/ml/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc @@ -6,8 +6,7 @@ Get scheduled events ++++ -Retrieves information about the scheduled events in -calendars. +Retrieves information about the scheduled events in calendars. [[ml-get-calendar-event-request]] ==== {api-request-title} @@ -29,26 +28,30 @@ calendars. You can get scheduled event information for a single calendar or for all calendars by using `_all`. +For more information, see +{stack-ov}/ml-calendars.html[Calendars and scheduled events]. + [[ml-get-calendar-event-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the calendar. +``:: + (Required, string) Identifier for the calendar. [[ml-get-calendar-event-request-body]] ==== {api-request-body-title} -`end` (Optional):: - (string) Specifies to get events with timestamps earlier than this time. +`end`:: + (Optional, string) Specifies to get events with timestamps earlier than this + time. -`from` (Optional):: - (integer) Skips the specified number of events. +`from`:: + (Optional, integer) Skips the specified number of events. -`size` (Optional):: - (integer) Specifies the maximum number of events to obtain. +`size`:: + (Optional, integer) Specifies the maximum number of events to obtain. -`start` (Optional):: - (string) Specifies to get events with timestamps after this time. +`start`:: + (Optional, string) Specifies to get events with timestamps after this time. [[ml-get-calendar-event-results]] ==== {api-response-body-title} @@ -56,8 +59,26 @@ calendars by using `_all`. The API returns the following information: `events`:: - (array) An array of scheduled event resources. - For more information, see <>. + (array) An array of scheduled event resources. An events resource has the + following properties: + + `calendar_id`::: + (string) An identifier for the calendar that contains the scheduled + event. + + `description`::: + (string) A description of the scheduled event. + + `end_time`::: + (date) The timestamp for the end of the scheduled event + in milliseconds since the epoch or ISO 8601 format. + + `event_id`::: + (string) An automatically-generated identifier for the scheduled event. + + `start_time`::: + (date) The timestamp for the beginning of the scheduled event + in milliseconds since the epoch or ISO 8601 format. [[ml-get-calendar-event-example]] ==== {api-examples-title} @@ -106,5 +127,3 @@ The API returns the following results: // TESTRESPONSE[s/LS8LJGEBMTCMA-qz49st/$body.$_path/] // TESTRESPONSE[s/Li8LJGEBMTCMA-qz49st/$body.$_path/] // TESTRESPONSE[s/Ly8LJGEBMTCMA-qz49st/$body.$_path/] - -For more information about these properties, see <>. diff --git a/docs/reference/ml/apis/get-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc similarity index 71% rename from docs/reference/ml/apis/get-calendar.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc index 3d55f825bdb86..5dfe3808a5667 100644 --- a/docs/reference/ml/apis/get-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc @@ -28,21 +28,24 @@ Retrieves configuration information for calendars. You can get information for a single calendar or for all calendars by using `_all`. +For more information, see +{stack-ov}/ml-calendars.html[Calendars and scheduled events]. + [[ml-get-calendar-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the calendar. +``:: + (Required, string) Identifier for the calendar. [[ml-get-calendar-request-body]] ==== {api-request-body-title} -`page` (Optional):: +`page`:: `from`::: - (integer) Skips the specified number of calendars. + (Optional, integer) Skips the specified number of calendars. -`size` (Optional)::: - (integer) Specifies the maximum number of calendars to obtain. +`size`::: + (Optional, integer) Specifies the maximum number of calendars to obtain. [[ml-get-calendar-results]] ==== {api-response-body-title} @@ -50,8 +53,13 @@ You can get information for a single calendar or for all calendars by using The API returns the following information: `calendars`:: - (array) An array of calendar resources. - For more information, see <>. + (array) An array of calendar resources. A calendar resource has the following + properties: + `calendar_id`::: + (string) A numerical character string that uniquely identifies the calendar. + + `job_ids`::: + (array) An array of job identifiers. For example: `["total-requests"]`. [[ml-get-calendar-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-category.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc similarity index 88% rename from docs/reference/ml/apis/get-category.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-category.asciidoc index 6301eaf13a538..d785d3217ec2c 100644 --- a/docs/reference/ml/apis/get-category.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc @@ -34,21 +34,21 @@ For more information about categories, see [[ml-get-category-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. -`` (Optional):: - (long) Identifier for the category. If you do not specify this parameter, - the API returns information about all categories in the job. +``:: + (Optional, long) Identifier for the category. If you do not specify this + parameter, the API returns information about all categories in the job. [[ml-get-category-request-body]] ==== {api-request-body-title} -`page` (Optional):: +`page`:: `from`::: - (integer) Skips the specified number of categories. + (Optional, integer) Skips the specified number of categories. `size`::: - (integer) Specifies the maximum number of categories to obtain. + (Optional, integer) Specifies the maximum number of categories to obtain. [[ml-get-category-results]] ==== {api-response-body-title} diff --git a/docs/reference/ml/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc similarity index 94% rename from docs/reference/ml/apis/get-datafeed-stats.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc index 8a71351e76d67..95cb7777b9b08 100644 --- a/docs/reference/ml/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc @@ -44,9 +44,9 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. [[ml-get-datafeed-stats-path-parms]] ==== {api-path-parms-title} -`` (Optional):: - (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a - wildcard expression. If you do not specify one of these options, the API +``:: + (Optional, string) Identifier for the {dfeed}. It can be a {dfeed} identifier + or a wildcard expression. If you do not specify one of these options, the API returns statistics for all {dfeeds}. [[ml-get-datafeed-stats-results]] diff --git a/docs/reference/ml/apis/get-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc similarity index 92% rename from docs/reference/ml/apis/get-datafeed.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc index abc79ae5c7d71..6e93c1f9ce182 100644 --- a/docs/reference/ml/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc @@ -41,9 +41,9 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. [[ml-get-datafeed-path-parms]] ==== {api-path-parms-title} -`` (Optional):: - (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a - wildcard expression. If you do not specify one of these options, the API +``:: + (Optional, string) Identifier for the {dfeed}. It can be a {dfeed} identifier + or a wildcard expression. If you do not specify one of these options, the API returns information about all {dfeeds}. [[ml-get-datafeed-results]] diff --git a/docs/reference/ml/apis/get-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc similarity index 74% rename from docs/reference/ml/apis/get-filter.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc index ad5fee343f6d5..ba2036bea5dc9 100644 --- a/docs/reference/ml/apis/get-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc @@ -31,17 +31,17 @@ You can get a single filter or all filters. For more information, see [[ml-get-filter-path-parms]] ==== {api-path-parms-title} -`` (Optional):: - (string) Identifier for the filter. +``:: + (Optional, string) Identifier for the filter. [[ml-get-filter-query-parms]] ==== {api-query-parms-title} -`from` (Optional)::: - (integer) Skips the specified number of filters. +`from`::: + (Optional, integer) Skips the specified number of filters. -`size` (Optional)::: - (integer) Specifies the maximum number of filters to obtain. +`size`::: + (Optional, integer) Specifies the maximum number of filters to obtain. [[ml-get-filter-results]] ==== {api-response-body-title} @@ -49,8 +49,16 @@ You can get a single filter or all filters. For more information, see The API returns the following information: `filters`:: - (array) An array of filter resources. - For more information, see <>. + (array) An array of filter resources. A filter resource has the following + properties: + `filter_id`::: + (string) A string that uniquely identifies the filter. + + `description`::: + (string) A description of the filter. + + `items`::: + (array of strings) An array of strings which is the filter item list. [[ml-get-filter-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-influencer.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc similarity index 71% rename from docs/reference/ml/apis/get-influencer.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc index 8d7ca889a264f..21d4f1872d09e 100644 --- a/docs/reference/ml/apis/get-influencer.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc @@ -26,37 +26,38 @@ privileges. See {stack-ov}/security-privileges.html[Security privileges] and [[ml-get-influencer-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. [[ml-get-influencer-request-body]] ==== {api-request-body-title} -`desc` (Optional):: - (boolean) If true, the results are sorted in descending order. +`desc`:: + (Optional, boolean) If true, the results are sorted in descending order. -`end` (Optional):: - (string) Returns influencers with timestamps earlier than this time. +`end`:: + (Optional, string) Returns influencers with timestamps earlier than this time. -`exclude_interim` (Optional):: - (boolean) If true, the output excludes interim results. - By default, interim results are included. +`exclude_interim`:: + (Optional, boolean) If true, the output excludes interim results. By default, + interim results are included. -`influencer_score` (Optional):: - (double) Returns influencers with anomaly scores greater or equal than this value. +`influencer_score`:: + (Optional, double) Returns influencers with anomaly scores greater than or + equal to this value. -`page` (Optional):: +`page`:: `from`::: - (integer) Skips the specified number of influencers. + (Optional, integer) Skips the specified number of influencers. `size`::: - (integer) Specifies the maximum number of influencers to obtain. + (Optional, integer) Specifies the maximum number of influencers to obtain. -`sort` (Optional):: - (string) Specifies the sort field for the requested influencers. - By default the influencers are sorted by the `influencer_score` value. +`sort`:: + (Optional, string) Specifies the sort field for the requested influencers. By + default, the influencers are sorted by the `influencer_score` value. -`start` (Optional):: - (string) Returns influencers with timestamps after this time. +`start`:: + (Optional, string) Returns influencers with timestamps after this time. [[ml-get-influencer-results]] ==== {api-response-body-title} diff --git a/docs/reference/ml/apis/get-job-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc similarity index 93% rename from docs/reference/ml/apis/get-job-stats.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc index 8a705d7ff9ed1..22bb053e1934a 100644 --- a/docs/reference/ml/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc @@ -39,10 +39,10 @@ IMPORTANT: This API returns a maximum of 10,000 jobs. [[ml-get-job-stats-path-parms]] ==== {api-path-parms-title} -`` (Optional):: - (string) An identifier for the job. It can be a job identifier, a group name, - or a wildcard expression. If you do not specify one of these options, the API - returns statistics for all jobs. +``:: + (Optional, string) An identifier for the job. It can be a job identifier, a + group name, or a wildcard expression. If you do not specify one of these + options, the API returns statistics for all jobs. [[ml-get-job-stats-results]] ==== {api-response-body-title} diff --git a/docs/reference/ml/apis/get-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc similarity index 92% rename from docs/reference/ml/apis/get-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-job.asciidoc index 176ca09fc56c5..0a83513dbf5cd 100644 --- a/docs/reference/ml/apis/get-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc @@ -39,10 +39,10 @@ IMPORTANT: This API returns a maximum of 10,000 jobs. [[ml-get-job-path-parms]] ==== {api-path-parms-title} -` (Optional)`:: - (string) Identifier for the job. It can be a job identifier, a group name, - or a wildcard expression. If you do not specify one of these options, the API - returns information for all jobs. +``:: + (Optional, string) Identifier for the job. It can be a job identifier, a group + name, or a wildcard expression. If you do not specify one of these options, + the API returns information for all jobs. [[ml-get-job-results]] ==== {api-response-body-title} diff --git a/docs/reference/ml/apis/get-ml-info.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-ml-info.asciidoc similarity index 100% rename from docs/reference/ml/apis/get-ml-info.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-ml-info.asciidoc diff --git a/docs/reference/ml/apis/get-overall-buckets.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc similarity index 82% rename from docs/reference/ml/apis/get-overall-buckets.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc index 4d8287f9a54f7..6d7e702408bb6 100644 --- a/docs/reference/ml/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc @@ -56,39 +56,39 @@ overall buckets with a span equal to the largest job's `bucket_span`. [[ml-get-overall-buckets-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. It can be a job identifier, a group name, a - comma-separated list of jobs or groups, or a wildcard expression. +``:: + (Required, string) Identifier for the job. It can be a job identifier, a group + name, a comma-separated list of jobs or groups, or a wildcard expression. [[ml-get-overall-buckets-request-body]] ==== {api-request-body-title} -`allow_no_jobs` (Optional):: - (boolean) If `false` and the `job_id` does not match any job an error will - be returned. The default value is `true`. +`allow_no_jobs`:: + (Optional, boolean) If `false` and the `job_id` does not match any job, an + error occurs. The default value is `true`. -`bucket_span` (Optional):: - (string) The span of the overall buckets. Must be greater or equal +`bucket_span`:: + (Optional, string) The span of the overall buckets. Must be greater or equal to the largest job's `bucket_span`. Defaults to the largest job's `bucket_span`. -`end` (Optional):: - (string) Returns overall buckets with timestamps earlier than this time. +`end`:: + (Optional, string) Returns overall buckets with timestamps earlier than this + time. -`exclude_interim` (Optional):: - (boolean) If `true`, the output excludes interim overall buckets. - Overall buckets are interim if any of the job buckets within - the overall bucket interval are interim. - By default, interim results are included. +`exclude_interim`:: + (Optional, boolean) If `true`, the output excludes interim overall buckets. + Overall buckets are interim if any of the job buckets within the overall + bucket interval are interim. By default, interim results are included. -`overall_score` (Optional):: - (double) Returns overall buckets with overall scores greater or equal than - this value. +`overall_score`:: + (Optional, double) Returns overall buckets with overall scores greater or + equal than this value. -`start` (Optional):: - (string) Returns overall buckets with timestamps after this time. +`start`:: + (Optional, string) Returns overall buckets with timestamps after this time. -`top_n` (Optional):: - (integer) The number of top job bucket scores to be used in the +`top_n`:: + (Optional, integer) The number of top job bucket scores to be used in the `overall_score` calculation. The default value is `1`. [[ml-get-overall-buckets-results]] diff --git a/docs/reference/ml/apis/get-record.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc similarity index 71% rename from docs/reference/ml/apis/get-record.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-record.asciidoc index 0acc3e0e49fce..5dfba1bda1722 100644 --- a/docs/reference/ml/apis/get-record.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc @@ -26,37 +26,38 @@ privileges. See {stack-ov}/security-privileges.html[Security privileges] and [[ml-get-record-path-parms]] ==== {api-path-parms-title} -`job_id` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. [[ml-get-record-request-body]] ==== {api-request-body-title} -`desc` (Optional):: - (boolean) If true, the results are sorted in descending order. +`desc`:: + (Optional, boolean) If true, the results are sorted in descending order. -`end` (Optional):: - (string) Returns records with timestamps earlier than this time. +`end`:: + (Optional, string) Returns records with timestamps earlier than this time. -`exclude_interim` (Optional):: - (boolean) If true, the output excludes interim results. - By default, interim results are included. +`exclude_interim`:: + (Optional, boolean) If true, the output excludes interim results. By default, + interim results are included. -`page` (Optional):: +`page`:: `from`::: - (integer) Skips the specified number of records. + (Optional, integer) Skips the specified number of records. `size`::: - (integer) Specifies the maximum number of records to obtain. + (Optional, integer) Specifies the maximum number of records to obtain. -`record_score` (Optional):: - (double) Returns records with anomaly scores greater or equal than this value. +`record_score`:: + (Optional, double) Returns records with anomaly scores greater or equal than + this value. -`sort` (Optional):: - (string) Specifies the sort field for the requested records. - By default, the records are sorted by the `anomaly_score` value. +`sort`:: + (Optional, string) Specifies the sort field for the requested records. By + default, the records are sorted by the `anomaly_score` value. -`start` (Optional):: - (string) Returns records with timestamps after this time. +`start`:: + (Optional, string) Returns records with timestamps after this time. [[ml-get-record-results]] ==== {api-response-body-title} diff --git a/docs/reference/ml/apis/get-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc similarity index 71% rename from docs/reference/ml/apis/get-snapshot.asciidoc rename to docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc index ea1b15df33f33..d829bfb579c4f 100644 --- a/docs/reference/ml/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc @@ -25,34 +25,34 @@ Retrieves information about model snapshots. [[ml-get-snapshot-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. -`` (Optional):: - (string) Identifier for the model snapshot. If you do not specify this - optional parameter, the API returns information about all model snapshots. +``:: + (Optional, string) Identifier for the model snapshot. If you do not specify + this optional parameter, the API returns information about all model snapshots. [[ml-get-snapshot-request-body]] ==== {api-request-body-title} -`desc` (Optional):: - (boolean) If true, the results are sorted in descending order. +`desc`:: + (Optional, boolean) If true, the results are sorted in descending order. -`end` (Optional):: - (date) Returns snapshots with timestamps earlier than this time. +`end`:: + (Optional, date) Returns snapshots with timestamps earlier than this time. -`from` (Optional):: - (integer) Skips the specified number of snapshots. +`from`:: + (Optional, integer) Skips the specified number of snapshots. -`size` (Optional):: - (integer) Specifies the maximum number of snapshots to obtain. +`size`:: + (Optional, integer) Specifies the maximum number of snapshots to obtain. -`sort` (Optional):: - (string) Specifies the sort field for the requested snapshots. - By default, the snapshots are sorted by their timestamp. +`sort`:: + (Optional, string) Specifies the sort field for the requested snapshots. By + default, the snapshots are sorted by their timestamp. -`start` (Optional):: - (string) Returns snapshots with timestamps after this time. +`start`:: + (Optional, string) Returns snapshots with timestamps after this time. [[ml-get-snapshot-results]] ==== {api-response-body-title} @@ -66,9 +66,6 @@ The API returns the following information: [[ml-get-snapshot-example]] ==== {api-examples-title} -The following example gets model snapshot information for the -`it_ops_new_logs` job: - [source,js] -------------------------------------------------- GET _ml/anomaly_detectors/farequote/model_snapshots diff --git a/docs/reference/ml/apis/jobcounts.asciidoc b/docs/reference/ml/anomaly-detection/apis/jobcounts.asciidoc similarity index 100% rename from docs/reference/ml/apis/jobcounts.asciidoc rename to docs/reference/ml/anomaly-detection/apis/jobcounts.asciidoc diff --git a/docs/reference/ml/apis/jobresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/jobresource.asciidoc similarity index 99% rename from docs/reference/ml/apis/jobresource.asciidoc rename to docs/reference/ml/anomaly-detection/apis/jobresource.asciidoc index 751bf33788121..e6e243120cf19 100644 --- a/docs/reference/ml/apis/jobresource.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/jobresource.asciidoc @@ -438,7 +438,7 @@ A custom rule has the following properties: To add a scope for a field, add the field name as a key in the scope object and set its value to an object with the following properties: `filter_id`::: - (string) The id of the <> to be used. + (string) The id of the filter to be used. `filter_type`::: (string) Either `include` (the rule applies for values in the filter) diff --git a/docs/reference/ml/apis/ml-api.asciidoc b/docs/reference/ml/anomaly-detection/apis/ml-api.asciidoc similarity index 83% rename from docs/reference/ml/apis/ml-api.asciidoc rename to docs/reference/ml/anomaly-detection/apis/ml-api.asciidoc index f4da1143735bd..2014d05812595 100644 --- a/docs/reference/ml/apis/ml-api.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/ml-api.asciidoc @@ -1,12 +1,14 @@ [role="xpack"] [testenv="platinum"] [[ml-apis]] -== Machine learning APIs +== {ml-cap} {anomaly-detect} APIs -You can use the following APIs to perform {ml} activities. -See <> for the resource definitions used by the +You can use the following APIs to perform {ml} {anomaly-detect} activities. See +<> for the resource definitions used by the machine learning APIs and in advanced job configuration options in Kibana. +See also <>. + [discrete] [[ml-api-anomaly-job-endpoint]] === {anomaly-jobs-cap} @@ -48,23 +50,11 @@ machine learning APIs and in advanced job configuration options in Kibana. * <> * <> -[discrete] -[[ml-api-dfanalytics-endpoint]] -=== {dfanalytics-cap} APIs - -* <> or -<> -* <> or -<> -* <> or <> -* <> - - [discrete] [[ml-api-job-endpoint]] === Jobs -See <> and <>. +See <>. [discrete] [[ml-api-snapshot-endpoint]] @@ -120,7 +110,6 @@ include::put-job.asciidoc[] include::put-calendar.asciidoc[] include::put-datafeed.asciidoc[] include::put-filter.asciidoc[] -include::put-dfanalytics.asciidoc[] //DELETE include::delete-calendar.asciidoc[] include::delete-datafeed.asciidoc[] @@ -131,9 +120,6 @@ include::delete-job.asciidoc[] include::delete-calendar-job.asciidoc[] include::delete-snapshot.asciidoc[] include::delete-expired-data.asciidoc[] -include::delete-dfanalytics.asciidoc[] -//EVALUATE -include::evaluate-dfanalytics.asciidoc[] //FIND include::find-file-structure.asciidoc[] //FLUSH @@ -141,9 +127,8 @@ include::flush-job.asciidoc[] //FORECAST include::forecast.asciidoc[] //GET -include::get-calendar.asciidoc[] include::get-bucket.asciidoc[] -include::get-overall-buckets.asciidoc[] +include::get-calendar.asciidoc[] include::get-category.asciidoc[] include::get-datafeed.asciidoc[] include::get-datafeed-stats.asciidoc[] @@ -152,11 +137,10 @@ include::get-job.asciidoc[] include::get-job-stats.asciidoc[] include::get-ml-info.asciidoc[] include::get-snapshot.asciidoc[] +include::get-overall-buckets.asciidoc[] include::get-calendar-event.asciidoc[] include::get-filter.asciidoc[] include::get-record.asciidoc[] -include::get-dfanalytics.asciidoc[] -include::get-dfanalytics-stats.asciidoc[] //OPEN include::open-job.asciidoc[] //POST @@ -168,9 +152,7 @@ include::revert-snapshot.asciidoc[] //SET/START/STOP include::set-upgrade-mode.asciidoc[] include::start-datafeed.asciidoc[] -include::start-dfanalytics.asciidoc[] include::stop-datafeed.asciidoc[] -include::stop-dfanalytics.asciidoc[] //UPDATE include::update-datafeed.asciidoc[] include::update-filter.asciidoc[] diff --git a/docs/reference/ml/apis/open-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc similarity index 89% rename from docs/reference/ml/apis/open-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/open-job.asciidoc index 84000cb89b0de..23ccd1586ed75 100644 --- a/docs/reference/ml/apis/open-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc @@ -7,8 +7,6 @@ ++++ Opens one or more jobs. -A job must be opened in order for it to be ready to receive and analyze data. -A job can be opened and closed multiple times throughout its lifecycle. [[ml-open-job-request]] ==== {api-request-title} @@ -25,6 +23,9 @@ A job can be opened and closed multiple times throughout its lifecycle. [[ml-open-job-desc]] ==== {api-description-title} +A job must be opened in order for it to be ready to receive and analyze data. +A job can be opened and closed multiple times throughout its lifecycle. + When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically @@ -34,15 +35,15 @@ data is received. [[ml-open-job-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job +``:: + (Required, string) Identifier for the job [[ml-open-job-request-body]] ==== {api-request-body-title} -`timeout` (Optional):: - (time) Controls the time to wait until a job has opened. - The default value is 30 minutes. +`timeout`:: + (Optional, time) Controls the time to wait until a job has opened. The default + value is 30 minutes. [[ml-open-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/post-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc similarity index 72% rename from docs/reference/ml/apis/post-calendar-event.asciidoc rename to docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc index 88d771f3b7f18..b247297456a30 100644 --- a/docs/reference/ml/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc @@ -29,16 +29,31 @@ of which must have a start time, end time, and description. [[ml-post-calendar-event-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the calendar. +``:: + (Required, string) Identifier for the calendar. [[ml-post-calendar-event-request-body]] ==== {api-request-body-title} -`events` (Required):: - (array) A list of one of more scheduled events. The event's start and end - times may be specified as integer milliseconds since the epoch or as a string - in ISO 8601 format. See <>. +`events`:: + (Required, array) A list of one of more scheduled events. The event's start + and end times may be specified as integer milliseconds since the epoch or as a + string in ISO 8601 format. An event resource has the following properties: + + `calendar_id`::: + (Optional, string) An identifier for the calendar that contains the scheduled + event. + + `description`::: + (Optional, string) A description of the scheduled event. + + `end_time`::: + (Required, date) The timestamp for the end of the scheduled event + in milliseconds since the epoch or ISO 8601 format. + + `start_time`::: + (Required, date) The timestamp for the beginning of the scheduled event + in milliseconds since the epoch or ISO 8601 format. [[ml-post-calendar-event-example]] ==== {api-examples-title} @@ -87,6 +102,3 @@ The API returns the following results: } ---- // TESTRESPONSE - -For more information about these properties, see -<>. diff --git a/docs/reference/ml/apis/post-data.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc similarity index 93% rename from docs/reference/ml/apis/post-data.asciidoc rename to docs/reference/ml/anomaly-detection/apis/post-data.asciidoc index 3c2d0e49fde93..0f092ebeddc9f 100644 --- a/docs/reference/ml/apis/post-data.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc @@ -52,17 +52,17 @@ or a comma-separated list. [[ml-post-data-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. [[ml-post-data-query-parms]] ==== {api-query-parms-title} -`reset_start` (Optional):: - (string) Specifies the start of the bucket resetting range. +`reset_start`:: + (Optional, string) Specifies the start of the bucket resetting range. -`reset_end` (Optional):: - (string) Specifies the end of the bucket resetting range. +`reset_end`:: + (Optional, string) Specifies the end of the bucket resetting range. [[ml-post-data-request-body]] ==== {api-request-body-title} diff --git a/docs/reference/ml/apis/preview-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc similarity index 97% rename from docs/reference/ml/apis/preview-datafeed.asciidoc rename to docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc index 4ca3ebcd10e40..dcf96d2297bb3 100644 --- a/docs/reference/ml/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc @@ -40,8 +40,8 @@ it to ensure it is returning the expected data. [[ml-preview-datafeed-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dfeed}. +``:: + (Required, string) Identifier for the {dfeed}. [[ml-preview-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/put-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc similarity index 76% rename from docs/reference/ml/apis/put-calendar-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc index 0a1ee2fcc6de0..d693543931013 100644 --- a/docs/reference/ml/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc @@ -23,19 +23,16 @@ Adds a job to a calendar. [[ml-put-calendar-job-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the calendar. +``:: + (Required, string) Identifier for the calendar. -`` (Required):: - (string) An identifier for the job. It can be a job identifier, a group name, - or a comma-separated list of jobs or groups. +``:: + (Required, string) An identifier for the job. It can be a job identifier, a + group name, or a comma-separated list of jobs or groups. [[ml-put-calendar-job-example]] ==== {api-examples-title} -The following example associates the `planned-outages` calendar with the -`total-requests` job: - [source,js] -------------------------------------------------- PUT _ml/calendars/planned-outages/jobs/total-requests diff --git a/docs/reference/ml/apis/put-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc similarity index 80% rename from docs/reference/ml/apis/put-calendar.asciidoc rename to docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc index f98dd541d6753..cefcb4e041189 100644 --- a/docs/reference/ml/apis/put-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc @@ -24,25 +24,23 @@ Instantiates a calendar. ==== {api-description-title} For more information, see -{stack-ov}/ml-calendars.html[Calendars and Scheduled Events]. +{stack-ov}/ml-calendars.html[Calendars and scheduled events]. [[ml-put-calendar-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the calendar. +``:: + (Required, string) Identifier for the calendar. [[ml-put-calendar-request-body]] ==== {api-request-body-title} -`description` (Optional):: - (string) A description of the calendar. +`description`:: + (Optional, string) A description of the calendar. [[ml-put-calendar-example]] ==== {api-examples-title} -The following example creates the `planned-outages` calendar: - [source,js] -------------------------------------------------- PUT _ml/calendars/planned-outages diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc new file mode 100644 index 0000000000000..fa99d311ff015 --- /dev/null +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -0,0 +1,144 @@ +[role="xpack"] +[testenv="platinum"] +[[ml-put-datafeed]] +=== Create {dfeeds} API + +[subs="attributes"] +++++ +Create {dfeeds} +++++ + +Instantiates a {dfeed}. + +[[ml-put-datafeed-request]] +==== {api-request-title} + +`PUT _ml/datafeeds/` + +[[ml-put-datafeed-prereqs]] +==== {api-prereq-title} + +* You must create a job before you create a {dfeed}. +* If {es} {security-features} are enabled, you must have `manage_ml` or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-put-datafeed-desc]] +==== {api-description-title} + +You can associate only one {dfeed} to each job. + +[IMPORTANT] +==== +* You must use {kib} or this API to create a {dfeed}. Do not put a +{dfeed} directly to the `.ml-config` index using the {es} index API. If {es} +{security-features} are enabled, do not give users `write` privileges on the +`.ml-config` index. +* When {es} {security-features} are enabled, your {dfeed} remembers which roles +the user who created it had at the time of creation and runs the query using +those same roles. +==== + +[[ml-put-datafeed-path-parms]] +==== {api-path-parms-title} + +``:: + (Required, string) A numerical character string that uniquely identifies the + {dfeed}. This identifier can contain lowercase alphanumeric characters (a-z + and 0-9), hyphens, and underscores. It must start and end with alphanumeric + characters. + +[[ml-put-datafeed-request-body]] +==== {api-request-body-title} + +`aggregations`:: + (Optional, object) If set, the {dfeed} performs aggregation searches. For more + information, see <>. + +`chunking_config`:: + (Optional, object) Specifies how data searches are split into time chunks. See + <>. + +`delayed_data_check_config`:: + (Optional, object) Specifies whether the data feed checks for missing data and + the size of the window. See <>. + +`frequency`:: + (Optional, time units) The interval at which scheduled queries are made while + the {dfeed} runs in real time. The default value is either the bucket span for + short bucket spans, or, for longer bucket spans, a sensible fraction of the + bucket span. For example: `150s`. + +`indices`:: + (Required, array) An array of index names. Wildcards are supported. For + example: `["it_ops_metrics", "server*"]`. + +`job_id`:: + (Required, string) A numerical character string that uniquely identifies the + job. + +`query`:: + (Optional, object) The {es} query domain-specific language (DSL). This value + corresponds to the query object in an {es} search POST body. All the options + that are supported by {Es} can be used, as this object is passed verbatim to + {es}. By default, this property has the following value: + `{"match_all": {"boost": 1}}`. + +`query_delay`:: + (Optional, time units) The number of seconds behind real time that data is + queried. For example, if data from 10:04 a.m. might not be searchable in {es} + until 10:06 a.m., set this property to 120 seconds. The default value is `60s`. + +`script_fields`:: + (Optional, object) Specifies scripts that evaluate custom expressions and + returns script fields to the {dfeed}. The + <> in a job can contain + functions that use these script fields. For more information, see + <>. + +`scroll_size`:: + (Optional, unsigned integer) The `size` parameter that is used in {es} + searches. The default value is `1000`. + +For more information about these properties, +see <>. + +[[ml-put-datafeed-example]] +==== {api-examples-title} + +The following example creates the `datafeed-total-requests` {dfeed}: + +[source,js] +-------------------------------------------------- +PUT _ml/datafeeds/datafeed-total-requests +{ + "job_id": "total-requests", + "indices": ["server-metrics"] +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:setup:server_metrics_job] + +When the {dfeed} is created, you receive the following results: +[source,js] +---- +{ + "datafeed_id": "datafeed-total-requests", + "job_id": "total-requests", + "query_delay": "83474ms", + "indices": [ + "server-metrics" + ], + "query": { + "match_all": { + "boost": 1.0 + } + }, + "scroll_size": 1000, + "chunking_config": { + "mode": "auto" + } +} +---- +// TESTRESPONSE[s/"query_delay": "83474ms"/"query_delay": $body.query_delay/] +// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] diff --git a/docs/reference/ml/apis/put-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc similarity index 82% rename from docs/reference/ml/apis/put-filter.asciidoc rename to docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc index ad0d6d34ea81d..da080ef700f8b 100644 --- a/docs/reference/ml/apis/put-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc @@ -30,20 +30,19 @@ the `custom_rules` property of <` (Required):: - (string) Identifier for the filter. +``:: + (Required, string) Identifier for the filter. [[ml-put-filter-request-body]] ==== {api-request-body-title} -`description` (Optional):: - (string) A description of the filter. +`description`:: + (Optional, string) A description of the filter. -`items` (Required):: - (array of strings) The items of the filter. - A wildcard `*` can be used at the beginning - or the end of an item. Up to 10000 items - are allowed in each filter. +`items`:: + (Required, array of strings) The items of the filter. A wildcard `*` can be + used at the beginning or the end of an item. Up to 10000 items are allowed in + each filter. [[ml-put-filter-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc similarity index 52% rename from docs/reference/ml/apis/put-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index ea92bf94d695c..dd32bb108d784 100644 --- a/docs/reference/ml/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -33,61 +33,65 @@ a job directly to the `.ml-config` index using the {es} index API. If {es} [[ml-put-job-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. This identifier can contain lowercase - alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must - start and end with alphanumeric characters. +``:: + (Required, string) Identifier for the job. This identifier can contain + lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It + must start and end with alphanumeric characters. [[ml-put-job-request-body]] ==== {api-request-body-title} -`analysis_config` (Required):: - (object) The analysis configuration, which specifies how to analyze the data. - See <>. +`analysis_config`:: + (Required, object) The analysis configuration, which specifies how to analyze + the data. See <>. -`analysis_limits` (Optional):: - (object) Specifies runtime limits for the job. See +`analysis_limits`:: + (Optional, object) Specifies runtime limits for the job. See <>. -`background_persist_interval` (Optional):: - (time units) Advanced configuration option. The time between each periodic - persistence of the model. See <>. +`background_persist_interval`:: + (Optional, time units) Advanced configuration option. The time between each + periodic persistence of the model. See <>. -`custom_settings` (Optional):: - (object) Advanced configuration option. Contains custom meta data about the - job. See <>. +`custom_settings`:: + (Optional, object) Advanced configuration option. Contains custom meta data + about the job. See <>. -`data_description` (Required):: - (object) Describes the format of the input data. This object is required, but - it can be empty (`{}`). See <>. +`data_description`:: + (Required, object) Describes the format of the input data. This object is + required, but it can be empty (`{}`). See + <>. -`description` (Optional):: - (string) A description of the job. +`description`:: + (Optional, string) A description of the job. -`groups` (Optional):: - (array of strings) A list of job groups. See <>. +`groups`:: + (Optional, array of strings) A list of job groups. See <>. -`model_plot_config` (Optional):: - (object) Advanced configuration option. Specifies to store model information - along with the results. This adds overhead to the performance of the system - and is not feasible for jobs with many entities, see <>. +`model_plot_config`:: + (Optional, object) Advanced configuration option. Specifies to store model + information along with the results. This adds overhead to the performance of + the system and is not feasible for jobs with many entities, see + <>. -`model_snapshot_retention_days` (Optional):: - (long) The time in days that model snapshots are retained for the job. - Older snapshots are deleted. The default value is `1`, which means snapshots - are retained for one day (twenty-four hours). +`model_snapshot_retention_days`:: + (Optional, long) The time in days that model snapshots are retained for the + job. Older snapshots are deleted. The default value is `1`, which means + snapshots are retained for one day (twenty-four hours). -`renormalization_window_days` (Optional):: - (long) Advanced configuration option. The period over which adjustments to the - score are applied, as new data is seen. See <>. +`renormalization_window_days`:: + (Optional, long) Advanced configuration option. The period over which + adjustments to the score are applied, as new data is seen. See + <>. -`results_index_name` (Optional):: - (string) A text string that affects the name of the {ml} results index. The - default value is `shared`, which generates an index named `.ml-anomalies-shared`. +`results_index_name`:: + (Optional, string) A text string that affects the name of the {ml} results + index. The default value is `shared`, which generates an index named + `.ml-anomalies-shared`. -`results_retention_days` (Optional):: - (long) Advanced configuration option. The number of days for which job results - are retained. See <>. +`results_retention_days`:: + (Optional, long) Advanced configuration option. The number of days for which + job results are retained. See <>. [[ml-put-job-example]] ==== {api-examples-title} @@ -116,7 +120,6 @@ PUT _ml/anomaly_detectors/total-requests } -------------------------------------------------- // CONSOLE -// TEST[skip: https://github.com/elastic/elasticsearch/issues/43271] When the job is created, you receive the following results: [source,js] diff --git a/docs/reference/ml/apis/resultsresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/resultsresource.asciidoc similarity index 100% rename from docs/reference/ml/apis/resultsresource.asciidoc rename to docs/reference/ml/anomaly-detection/apis/resultsresource.asciidoc diff --git a/docs/reference/ml/apis/revert-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc similarity index 87% rename from docs/reference/ml/apis/revert-snapshot.asciidoc rename to docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc index 86d3d4c14a93c..3d732fdf367be 100644 --- a/docs/reference/ml/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc @@ -16,6 +16,7 @@ Reverts to a specific snapshot. [[ml-revert-snapshot-prereqs]] ==== {api-prereq-title} +* Before you revert to a saved snapshot, you must close the job. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See {stack-ov}/security-privileges.html[Security privileges]. @@ -31,23 +32,21 @@ then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. -IMPORTANT: Before you revert to a saved snapshot, you must close the job. - [[ml-revert-snapshot-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. -`` (Required):: - (string) Identifier for the model snapshot. +``:: + (Required, string) Identifier for the model snapshot. [[ml-revert-snapshot-request-body]] ==== {api-request-body-title} -`delete_intervening_results` (Optional):: - (boolean) If true, deletes the results in the time period between the - latest results and the time of the reverted snapshot. It also resets the +`delete_intervening_results`:: + (Optional, boolean) If true, deletes the results in the time period between + the latest results and the time of the reverted snapshot. It also resets the model to accept records for this time period. The default value is false. NOTE: If you choose not to delete intervening results when reverting a snapshot, diff --git a/docs/reference/ml/apis/set-upgrade-mode.asciidoc b/docs/reference/ml/anomaly-detection/apis/set-upgrade-mode.asciidoc similarity index 93% rename from docs/reference/ml/apis/set-upgrade-mode.asciidoc rename to docs/reference/ml/anomaly-detection/apis/set-upgrade-mode.asciidoc index 6a00656430c66..763ebb92d8e37 100644 --- a/docs/reference/ml/apis/set-upgrade-mode.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/set-upgrade-mode.asciidoc @@ -61,12 +61,13 @@ IMPORTANT: No new {ml} jobs can be opened while the `upgrade_mode` setting is [[ml-set-upgrade-mode-query-parms]] ==== {api-query-parms-title} -`enabled` (Optional):: - (boolean) When `true`, this enables `upgrade_mode`. Defaults to `false` +`enabled`:: + (Optional, boolean) When `true`, this enables `upgrade_mode`. Defaults to + `false`. -`timeout` (Optional):: - (time) The time to wait for the request to be completed. - The default value is 30 seconds. +`timeout`:: + (Optional, time) The time to wait for the request to be completed. The default + value is 30 seconds. [[ml-set-upgrade-mode-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/snapshotresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/snapshotresource.asciidoc similarity index 100% rename from docs/reference/ml/apis/snapshotresource.asciidoc rename to docs/reference/ml/anomaly-detection/apis/snapshotresource.asciidoc diff --git a/docs/reference/ml/apis/start-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc similarity index 86% rename from docs/reference/ml/apis/start-datafeed.asciidoc rename to docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc index 05cf0766e9522..ceaa95585620d 100644 --- a/docs/reference/ml/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc @@ -9,8 +9,6 @@ ++++ Starts one or more {dfeeds}. -A {dfeed} must be started in order to retrieve data from {es}. -A {dfeed} can be started and stopped multiple times throughout its lifecycle. [[ml-start-datafeed-request]] ==== {api-request-title} @@ -20,6 +18,8 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. [[ml-start-datafeed-prereqs]] ==== {api-prereq-title} +* Before you can start a {dfeed}, the job must be open. Otherwise, an error +occurs. * If {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See {stack-ov}/security-privileges.html[Security privileges]. @@ -27,8 +27,8 @@ cluster privileges to use this API. See [[ml-start-datafeed-desc]] ==== {api-description-title} -NOTE: Before you can start a {dfeed}, the job must be open. Otherwise, an error -occurs. +A {dfeed} must be started in order to retrieve data from {es}. +A {dfeed} can be started and stopped multiple times throughout its lifecycle. When you start a {dfeed}, you can specify a start time. This enables you to include a training period, providing you have this data available in {es}. @@ -72,22 +72,22 @@ creation/update and runs the query using those same roles. [[ml-start-datafeed-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dfeed}. +``:: + (Required, string) Identifier for the {dfeed}. [[ml-start-datafeed-request-body]] ==== {api-request-body-title} -`end` (Optional):: - (string) The time that the {dfeed} should end. This value is exclusive. - The default value is an empty string. +`end`:: + (Optional, string) The time that the {dfeed} should end. This value is + exclusive. The default value is an empty string. -`start` (Optional):: - (string) The time that the {dfeed} should begin. This value is inclusive. - The default value is an empty string. +`start`:: + (Optional, string) The time that the {dfeed} should begin. This value is + inclusive. The default value is an empty string. -`timeout` (Optional):: - (time) Controls the amount of time to wait until a {dfeed} starts. +`timeout`:: + (Optional, time) Controls the amount of time to wait until a {dfeed} starts. The default value is 20 seconds. [[ml-start-datafeed-example]] diff --git a/docs/reference/ml/apis/stop-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc similarity index 85% rename from docs/reference/ml/apis/stop-datafeed.asciidoc rename to docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc index bdac8d51fab04..e0732b1428ff5 100644 --- a/docs/reference/ml/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc @@ -39,18 +39,18 @@ comma-separated list of {dfeeds} or a wildcard expression. You can close all [[ml-stop-datafeed-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a - wildcard expression. +``:: + (Required, string) Identifier for the {dfeed}. It can be a {dfeed} identifier + or a wildcard expression. [[ml-stop-datafeed-request-body]] ==== {api-request-body-title} -`force` (Optional):: - (boolean) If true, the {dfeed} is stopped forcefully. +`force`:: + (Optional, boolean) If true, the {dfeed} is stopped forcefully. -`timeout` (Optional):: - (time) Controls the amount of time to wait until a {dfeed} stops. +`timeout`:: + (Optional, time) Controls the amount of time to wait until a {dfeed} stops. The default value is 20 seconds. [[ml-stop-datafeed-example]] diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc similarity index 53% rename from docs/reference/ml/apis/update-datafeed.asciidoc rename to docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index b57088673d845..f6b835cda2814 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -35,62 +35,62 @@ using those same roles. [[ml-update-datafeed-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dfeed}. +``:: + (Required, string) Identifier for the {dfeed}. [[ml-update-datafeed-request-body]] ==== {api-request-body-title} The following properties can be updated after the {dfeed} is created: -`aggregations` (Optional):: - (object) If set, the {dfeed} performs aggregation searches. - For more information, see <>. +`aggregations`:: + (Optional, object) If set, the {dfeed} performs aggregation searches. For more + information, see <>. -`chunking_config` (Optional):: - (object) Specifies how data searches are split into time chunks. - See <>. +`chunking_config`:: + (Optional, object) Specifies how data searches are split into time chunks. See + <>. -`delayed_data_check_config` (Optional):: - (object) Specifies whether the data feed checks for missing data and +`delayed_data_check_config`:: + (Optional, object) Specifies whether the data feed checks for missing data and the size of the window. See <>. -`frequency` (Optional):: - (time units) The interval at which scheduled queries are made while the - {dfeed} runs in real time. The default value is either the bucket span for short - bucket spans, or, for longer bucket spans, a sensible fraction of the bucket - span. For example: `150s`. - -`indices` (Optional):: - (array) An array of index names. Wildcards are supported. For example: - `["it_ops_metrics", "server*"]`. - -`job_id` (Optional):: - (string) A numerical character string that uniquely identifies the job. - -`query` (Optional):: - (object) The {es} query domain-specific language (DSL). This value - corresponds to the query object in an {es} search POST body. All the - options that are supported by {es} can be used, as this object is - passed verbatim to {es}. By default, this property has the following - value: `{"match_all": {"boost": 1}}`. - -`query_delay` (Optional):: - (time units) The number of seconds behind real-time that data is queried. For - example, if data from 10:04 a.m. might not be searchable in {es} until - 10:06 a.m., set this property to 120 seconds. The default value is `60s`. - -`script_fields` (Optional):: - (object) Specifies scripts that evaluate custom expressions and returns - script fields to the {dfeed}. - The <> in a job can contain - functions that use these script fields. - For more information, - see {ref}/search-request-script-fields.html[Script Fields]. - -`scroll_size` (Optional):: - (unsigned integer) The `size` parameter that is used in {es} searches. - The default value is `1000`. +`frequency`:: + (Optional, time units) The interval at which scheduled queries are made while + the {dfeed} runs in real time. The default value is either the bucket span for + short bucket spans, or, for longer bucket spans, a sensible fraction of the + bucket span. For example: `150s`. + +`indices`:: + (Optional, array) An array of index names. Wildcards are supported. For + example: `["it_ops_metrics", "server*"]`. + +`job_id`:: + (Optional, string) A numerical character string that uniquely identifies the + job. + +`query`:: + (Optional, object) The {es} query domain-specific language (DSL). This value + corresponds to the query object in an {es} search POST body. All the options + that are supported by {es} can be used, as this object is passed verbatim to + {es}. By default, this property has the following value: + `{"match_all": {"boost": 1}}`. + +`query_delay`:: + (Optional, time units) The number of seconds behind real-time that data is + queried. For example, if data from 10:04 a.m. might not be searchable in {es} + until 10:06 a.m., set this property to 120 seconds. The default value is `60s`. + +`script_fields`:: + (Optional, object) Specifies scripts that evaluate custom expressions and + returns script fields to the {dfeed}. The + <> in a job can contain + functions that use these script fields. For more information, see + <>. + +`scroll_size`:: + (Optional, unsigned integer) The `size` parameter that is used in {es} + searches. The default value is `1000`. For more information about these properties, see <>. diff --git a/docs/reference/ml/apis/update-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc similarity index 80% rename from docs/reference/ml/apis/update-filter.asciidoc rename to docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc index df8f3056d12cc..51897ce061a23 100644 --- a/docs/reference/ml/apis/update-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc @@ -23,20 +23,20 @@ Updates the description of a filter, adds items, or removes items. [[ml-update-filter-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the filter. +``:: + (Required, string) Identifier for the filter. [[ml-update-filter-request-body]] ==== {api-request-body-title} -`description` (Optional):: - (string) A description for the filter. See <>. - -`add_items` (Optional):: - (array of strings) The items to add to the filter. +`add_items`:: + (Optional, array of strings) The items to add to the filter. + +`description`:: + (Optional, string) A description for the filter. -`remove_items` (Optional):: - (array of strings) The items to remove from the filter. +`remove_items`:: + (Optional, array of strings) The items to remove from the filter. [[ml-update-filter-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/update-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc similarity index 98% rename from docs/reference/ml/apis/update-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/update-job.asciidoc index e78bda613d801..9dfb833ec9b81 100644 --- a/docs/reference/ml/apis/update-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc @@ -24,8 +24,8 @@ Updates certain properties of a job. [[ml-update-job-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. [[ml-update-job-request-body]] ==== {api-request-body-title} diff --git a/docs/reference/ml/apis/update-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc similarity index 80% rename from docs/reference/ml/apis/update-snapshot.asciidoc rename to docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc index 1fe2ed5384bc0..36e3e5e82f693 100644 --- a/docs/reference/ml/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc @@ -24,24 +24,24 @@ Updates certain properties of a snapshot. [[ml-update-snapshot-path-parms]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the job. +``:: + (Required, string) Identifier for the job. -`` (Required):: - (string) Identifier for the model snapshot. +``:: + (Required, string) Identifier for the model snapshot. [[ml-update-snapshot-request-body]] ==== {api-request-body-title} The following properties can be updated after the model snapshot is created: -`description` (Optional):: - (string) A description of the model snapshot. For example, +`description`:: + (Optional, string) A description of the model snapshot. For example, "Before black friday". -`retain` (Optional):: - (boolean) If true, this snapshot will not be deleted during automatic cleanup - of snapshots older than `model_snapshot_retention_days`. +`retain`:: + (Optional, boolean) If true, this snapshot will not be deleted during + automatic cleanup of snapshots older than `model_snapshot_retention_days`. Note that this snapshot will still be deleted when the job is deleted. The default value is false. diff --git a/docs/reference/ml/apis/validate-detector.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc similarity index 100% rename from docs/reference/ml/apis/validate-detector.asciidoc rename to docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc diff --git a/docs/reference/ml/apis/validate-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc similarity index 100% rename from docs/reference/ml/apis/validate-job.asciidoc rename to docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc diff --git a/docs/reference/ml/categories.asciidoc b/docs/reference/ml/anomaly-detection/categories.asciidoc similarity index 100% rename from docs/reference/ml/categories.asciidoc rename to docs/reference/ml/anomaly-detection/categories.asciidoc diff --git a/docs/reference/ml/configuring.asciidoc b/docs/reference/ml/anomaly-detection/configuring.asciidoc similarity index 65% rename from docs/reference/ml/configuring.asciidoc rename to docs/reference/ml/anomaly-detection/configuring.asciidoc index a27818e0cf1f5..a1a2f477d8134 100644 --- a/docs/reference/ml/configuring.asciidoc +++ b/docs/reference/ml/anomaly-detection/configuring.asciidoc @@ -21,8 +21,8 @@ send your data to that job. The results of {ml} analysis are stored in {es} and you can use {kib} to help you visualize and explore the results. -For a tutorial that walks you through these configuration steps, -see <>. +//For a tutorial that walks you through these configuration steps, +//see <>. Though it is quite simple to analyze your data and provide quick {ml} results, gaining deep insights might require some additional planning and configuration. @@ -37,23 +37,16 @@ The scenarios in this section describe some best practices for generating useful * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/customurl.asciidoc include::customurl.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/aggregations.asciidoc include::aggregations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/detector-custom-rules.asciidoc include::detector-custom-rules.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/categories.asciidoc include::categories.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/populations.asciidoc include::populations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/transforms.asciidoc include::transforms.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/delayed-data-detection.asciidoc include::delayed-data-detection.asciidoc[] \ No newline at end of file diff --git a/docs/reference/ml/customurl.asciidoc b/docs/reference/ml/anomaly-detection/customurl.asciidoc similarity index 100% rename from docs/reference/ml/customurl.asciidoc rename to docs/reference/ml/anomaly-detection/customurl.asciidoc diff --git a/docs/reference/ml/delayed-data-detection.asciidoc b/docs/reference/ml/anomaly-detection/delayed-data-detection.asciidoc similarity index 100% rename from docs/reference/ml/delayed-data-detection.asciidoc rename to docs/reference/ml/anomaly-detection/delayed-data-detection.asciidoc diff --git a/docs/reference/ml/detector-custom-rules.asciidoc b/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc similarity index 100% rename from docs/reference/ml/detector-custom-rules.asciidoc rename to docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc diff --git a/docs/reference/ml/functions.asciidoc b/docs/reference/ml/anomaly-detection/functions.asciidoc similarity index 74% rename from docs/reference/ml/functions.asciidoc rename to docs/reference/ml/anomaly-detection/functions.asciidoc index 48e56bb4627ee..54a648635876d 100644 --- a/docs/reference/ml/functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions.asciidoc @@ -11,7 +11,8 @@ you specify the functions in {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. If you are creating your job in {kib}, you specify the functions differently depending on whether you are creating single metric, multi-metric, or advanced -jobs. For a demonstration of creating jobs in {kib}, see <>. +jobs. +//For a demonstration of creating jobs in {kib}, see <>. Most functions detect anomalies in both low and high values. In statistical terminology, they apply a two-sided test. Some functions offer low and high @@ -43,23 +44,16 @@ These functions effectively ignore empty buckets. * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/count.asciidoc include::functions/count.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/geo.asciidoc include::functions/geo.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/info.asciidoc include::functions/info.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/metric.asciidoc include::functions/metric.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/rare.asciidoc include::functions/rare.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/sum.asciidoc include::functions/sum.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/time.asciidoc include::functions/time.asciidoc[] diff --git a/docs/reference/ml/functions/count.asciidoc b/docs/reference/ml/anomaly-detection/functions/count.asciidoc similarity index 100% rename from docs/reference/ml/functions/count.asciidoc rename to docs/reference/ml/anomaly-detection/functions/count.asciidoc diff --git a/docs/reference/ml/functions/geo.asciidoc b/docs/reference/ml/anomaly-detection/functions/geo.asciidoc similarity index 100% rename from docs/reference/ml/functions/geo.asciidoc rename to docs/reference/ml/anomaly-detection/functions/geo.asciidoc diff --git a/docs/reference/ml/functions/info.asciidoc b/docs/reference/ml/anomaly-detection/functions/info.asciidoc similarity index 100% rename from docs/reference/ml/functions/info.asciidoc rename to docs/reference/ml/anomaly-detection/functions/info.asciidoc diff --git a/docs/reference/ml/functions/metric.asciidoc b/docs/reference/ml/anomaly-detection/functions/metric.asciidoc similarity index 100% rename from docs/reference/ml/functions/metric.asciidoc rename to docs/reference/ml/anomaly-detection/functions/metric.asciidoc diff --git a/docs/reference/ml/functions/rare.asciidoc b/docs/reference/ml/anomaly-detection/functions/rare.asciidoc similarity index 100% rename from docs/reference/ml/functions/rare.asciidoc rename to docs/reference/ml/anomaly-detection/functions/rare.asciidoc diff --git a/docs/reference/ml/functions/sum.asciidoc b/docs/reference/ml/anomaly-detection/functions/sum.asciidoc similarity index 100% rename from docs/reference/ml/functions/sum.asciidoc rename to docs/reference/ml/anomaly-detection/functions/sum.asciidoc diff --git a/docs/reference/ml/functions/time.asciidoc b/docs/reference/ml/anomaly-detection/functions/time.asciidoc similarity index 100% rename from docs/reference/ml/functions/time.asciidoc rename to docs/reference/ml/anomaly-detection/functions/time.asciidoc diff --git a/docs/reference/ml/populations.asciidoc b/docs/reference/ml/anomaly-detection/populations.asciidoc similarity index 100% rename from docs/reference/ml/populations.asciidoc rename to docs/reference/ml/anomaly-detection/populations.asciidoc diff --git a/docs/reference/ml/stopping-ml.asciidoc b/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc similarity index 97% rename from docs/reference/ml/stopping-ml.asciidoc rename to docs/reference/ml/anomaly-detection/stopping-ml.asciidoc index 2b50294e3bcb4..ddb3919dd5281 100644 --- a/docs/reference/ml/stopping-ml.asciidoc +++ b/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc @@ -35,7 +35,7 @@ For more information, see <>. A {dfeed} can be started and stopped multiple times throughout its lifecycle. -For examples of stopping {dfeeds} in {kib}, see <>. +//For examples of stopping {dfeeds} in {kib}, see <>. [float] [[stopping-all-ml-datafeeds]] diff --git a/docs/reference/ml/transforms.asciidoc b/docs/reference/ml/anomaly-detection/transforms.asciidoc similarity index 98% rename from docs/reference/ml/transforms.asciidoc rename to docs/reference/ml/anomaly-detection/transforms.asciidoc index e49b1ff32d4f2..9c11206f8bf2f 100644 --- a/docs/reference/ml/transforms.asciidoc +++ b/docs/reference/ml/anomaly-detection/transforms.asciidoc @@ -151,7 +151,7 @@ function. The `datafeed-test1` {dfeed} defines the script field. It contains a script that adds two fields in the document to produce a "total" error count. The syntax for the `script_fields` property is identical to that used by {es}. -For more information, see {ref}/search-request-script-fields.html[Script Fields]. +For more information, see {ref}/search-request-body.html#request-body-search-script-fields[Script Fields]. You can preview the contents of the {dfeed} by using the following API: @@ -176,8 +176,10 @@ the `error_count` and `aborted_count` values: ---------------------------------- NOTE: This example demonstrates how to use script fields, but it contains -insufficient data to generate meaningful results. For a full demonstration of -how to create jobs with sample data, see <>. +insufficient data to generate meaningful results. + +//For a full demonstration of +//how to create jobs with sample data, see <>. You can alternatively use {kib} to create an advanced job that uses script fields. To add the `script_fields` property to your {dfeed}, you must use the diff --git a/docs/reference/ml/apis/calendarresource.asciidoc b/docs/reference/ml/apis/calendarresource.asciidoc deleted file mode 100644 index 4b3353598ba81..0000000000000 --- a/docs/reference/ml/apis/calendarresource.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -[role="xpack"] -[testenv="platinum"] -[[ml-calendar-resource]] -=== Calendar resources - -A calendar resource has the following properties: - -`calendar_id`:: - (string) A numerical character string that uniquely identifies the calendar. - -`job_ids`:: - (array) An array of job identifiers. For example: `["total-requests"]`. - -For more information, see -{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events]. diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc deleted file mode 100644 index 6c4578abb1671..0000000000000 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ /dev/null @@ -1,143 +0,0 @@ -[role="xpack"] -[testenv="platinum"] -[[ml-put-datafeed]] -=== Create {dfeeds} API - -[subs="attributes"] -++++ -Create {dfeeds} -++++ - -Instantiates a {dfeed}. - -[[ml-put-datafeed-request]] -==== {api-request-title} - -`PUT _ml/datafeeds/` - -[[ml-put-datafeed-prereqs]] -==== {api-prereq-title} - -* If {es} {security-features} are enabled, you must have `manage_ml` or `manage` -cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. - -[[ml-put-datafeed-desc]] -==== {api-description-title} - -You must create a job before you create a {dfeed}. You can associate only one -{dfeed} to each job. - -[IMPORTANT] -==== -* You must use {kib} or this API to create a {dfeed}. Do not put a -{dfeed} directly to the `.ml-config` index using the {es} index API. If {es} -{security-features} are enabled, do not give users `write` privileges on the -`.ml-config` index. -* When {es} {security-features} are enabled, your {dfeed} remembers which roles -the user who created it had at the time of creation and runs the query using -those same roles. -==== - -[[ml-put-datafeed-path-parms]] -==== {api-path-parms-title} - -`` (Required):: - (string) A numerical character string that uniquely identifies the {dfeed}. - This identifier can contain lowercase alphanumeric characters (a-z and 0-9), - hyphens, and underscores. It must start and end with alphanumeric characters. - -[[ml-put-datafeed-request-body]] -==== {api-request-body-title} - -`aggregations` (Optional):: - (object) If set, the {dfeed} performs aggregation searches. - For more information, see <>. - -`chunking_config` (Optional):: - (object) Specifies how data searches are split into time chunks. - See <>. - -`delayed_data_check_config` (Optional):: - (object) Specifies whether the data feed checks for missing data and - the size of the window. See - <>. - -`frequency` (Optional):: - (time units) The interval at which scheduled queries are made while the {dfeed} - runs in real time. The default value is either the bucket span for short - bucket spans, or, for longer bucket spans, a sensible fraction of the bucket - span. For example: `150s`. - -`indices` (Required):: - (array) An array of index names. Wildcards are supported. For example: - `["it_ops_metrics", "server*"]`. - -`job_id` (Required):: - (string) A numerical character string that uniquely identifies the job. - -`query` (Optional):: - (object) The {es} query domain-specific language (DSL). This value - corresponds to the query object in an {es} search POST body. All the - options that are supported by {Es} can be used, as this object is - passed verbatim to {es}. By default, this property has the following - value: `{"match_all": {"boost": 1}}`. - -`query_delay` (Optional):: - (time units) The number of seconds behind real time that data is queried. For - example, if data from 10:04 a.m. might not be searchable in {es} until - 10:06 a.m., set this property to 120 seconds. The default value is `60s`. - -`script_fields` (Optional):: - (object) Specifies scripts that evaluate custom expressions and returns - script fields to the {dfeed}. - The <> in a job can contain - functions that use these script fields. For more information, - see {ref}/search-request-script-fields.html[Script Fields]. - -`scroll_size` (Optional):: - (unsigned integer) The `size` parameter that is used in {es} searches. - The default value is `1000`. - -For more information about these properties, -see <>. - -[[ml-put-datafeed-example]] -==== {api-examples-title} - -The following example creates the `datafeed-total-requests` {dfeed}: - -[source,js] --------------------------------------------------- -PUT _ml/datafeeds/datafeed-total-requests -{ - "job_id": "total-requests", - "indices": ["server-metrics"] -} --------------------------------------------------- -// CONSOLE -// TEST[skip:setup:server_metrics_job] - -When the {dfeed} is created, you receive the following results: -[source,js] ----- -{ - "datafeed_id": "datafeed-total-requests", - "job_id": "total-requests", - "query_delay": "83474ms", - "indices": [ - "server-metrics" - ], - "query": { - "match_all": { - "boost": 1.0 - } - }, - "scroll_size": 1000, - "chunking_config": { - "mode": "auto" - } -} ----- -// TESTRESPONSE[s/"query_delay": "83474ms"/"query_delay": $body.query_delay/] -// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] diff --git a/docs/reference/ml/apis/delete-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc similarity index 91% rename from docs/reference/ml/apis/delete-dfanalytics.asciidoc rename to docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc index 9904cf1fa49dc..0775bc889beee 100644 --- a/docs/reference/ml/apis/delete-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc @@ -7,10 +7,10 @@ Delete {dfanalytics-jobs} ++++ -experimental[] - Deletes an existing {dfanalytics-job}. +experimental[] + [[ml-delete-dfanalytics-request]] ==== {api-request-title} @@ -26,8 +26,8 @@ information, see {stack-ov}/security-privileges.html[Security privileges] and [[ml-delete-dfanalytics-path-params]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dfanalytics-job} you want to delete. +``:: + (Required, string) Identifier for the {dfanalytics-job} you want to delete. [[ml-delete-dfanalytics-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc new file mode 100644 index 0000000000000..86f3e15ed06f8 --- /dev/null +++ b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc @@ -0,0 +1,108 @@ +[role="xpack"] +[testenv="platinum"] +[[ml-dfanalytics-resources]] +=== {dfanalytics-cap} job resources + +{dfanalytics-cap} resources relate to APIs such as <> and +<>. + +[discrete] +[[ml-dfanalytics-properties]] +==== {api-definitions-title} + +`analysis`:: + (object) The type of analysis that is performed on the `source`. For example: + `outlier_detection`. For more information, see <>. + +`analyzed_fields`:: + (object) You can specify both `includes` and/or `excludes` patterns. If + `analyzed_fields` is not set, only the relevant fields will be included. For + example all the numeric fields for {oldetection}. + +`dest`:: + (object) The destination configuration of the analysis. For more information, + see <>. + +`id`:: + (string) The unique identifier for the {dfanalytics-job}. This identifier can + contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and + underscores. It must start and end with alphanumeric characters. This property + is informational; you cannot change the identifier for existing jobs. + +`model_memory_limit`:: + (string) The approximate maximum amount of memory resources that are + permitted for analytical processing. The default value for {dfanalytics-jobs} + is `1gb`. If your `elasticsearch.yml` file contains an + `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to + create {dfanalytics-jobs} that have `model_memory_limit` values greater than + that setting. For more information, see <>. + +`source`:: + (object) The source configuration, consisting of `index` and optionally a + `query`. For more information, see <>. + +[[dfanalytics-types]] +==== Analysis objects + +{dfanalytics-cap} resources contain `analysis` objects. For example, when you +create a {dfanalytics-job}, you must define the type of analysis it performs. + +[discrete] +[[oldetection-resources]] +===== {oldetection-cap} configuration objects + +An {oldetection} configuration object has the following properties: + +[discrete] +[[oldetection-properties]] +==== {api-definitions-title} + +`n_neighbors`:: + (integer) Defines the value for how many nearest neighbors each method of + {oldetection} will use to calculate its {olscore}. When the value is + not set, the system will dynamically detect an appropriate value. + +`method`:: + (string) Sets the method that {oldetection} uses. If the method is not set + {oldetection} uses an ensemble of different methods and normalises and + combines their individual {olscores} to obtain the overall {olscore}. + Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`. + +`feature_influence_threshold`:: + (double) The minimum {olscore} that a document needs to have in order to + calculate its {fiscore}. + Value range: 0-1 (`0.1` by default). + +[[dfanalytics-dest-resources]] +==== Dest configuration objects + +{dfanalytics-cap} resources contain `dest` objects. For example, when you +create a {dfanalytics-job}, you must define its destination. + +[discrete] +[[dfanalytics-dest-properties]] +==== {api-definitions-title} + +`index`:: + (string) The name of the index in which to store the results of the + {dfanalytics-job}. + +`results_field`:: + (string) The name of the field in which to store the results of the analysis. + The default value is `ml`. + +[[dfanalytics-source-resources]] +==== Source configuration objects + +The `source` configuration object has the following properties: + +`index`:: + (array) An array of index names on which to perform the analysis. It can be a + single index or index pattern as well as an array of indices or patterns. + +`query`:: + (object) The {es} query domain-specific language (DSL). This value + corresponds to the query object in an {es} search POST body. All the + options that are supported by {es} can be used, as this object is + passed verbatim to {es}. By default, this property has the following + value: `{"match_all": {}}`. \ No newline at end of file diff --git a/docs/reference/ml/apis/evaluate-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc similarity index 59% rename from docs/reference/ml/apis/evaluate-dfanalytics.asciidoc rename to docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc index 9c779f939e201..10c6e1c0bcadd 100644 --- a/docs/reference/ml/apis/evaluate-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc @@ -8,15 +8,9 @@ Evaluate {dfanalytics} ++++ -experimental[] +Evaluates the {dfanalytics} for an annotated index. -Evaluates the executed analysis on an index that is already annotated with a -field that contains the results of the analytics (the `ground truth`) for each -{dataframe} row. Evaluation is typically done via calculating a set of metrics -that capture various aspects of the quality of the results over the data for -which we have the `ground truth`. For different types of analyses different -metrics are suitable. This API packages together commonly used metrics for -various analyses. +experimental[] [[ml-evaluate-dfanalytics-request]] ==== {api-request-title} @@ -30,16 +24,46 @@ various analyses. information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. +[[ml-evaluate-dfanalytics-desc]] +==== {api-description-title} + +This API evaluates the executed analysis on an index that is already annotated +with a field that contains the results of the analytics (the `ground truth`) +for each {dataframe} row. + +Evaluation is typically done by calculating a set of metrics that capture various aspects of the quality of the results over the data for which you have the +`ground truth`. + +For different types of analyses different metrics are suitable. This API +packages together commonly used metrics for various analyses. + [[ml-evaluate-dfanalytics-request-body]] ==== {api-request-body-title} -`index` (Required):: - (object) Defines the `index` in which the evaluation will be performed. +`index`:: + (Required, object) Defines the `index` in which the evaluation will be + performed. -`evaluation` (Required):: - (object) Defines the type of evaluation you want to perform. For example: - `binary_soft_classification`. - See Evaluate API resources. +`evaluation`:: + (Required, object) Defines the type of evaluation you want to perform. For example: + `binary_soft_classification`. See <>. + +//// +[[ml-evaluate-dfanalytics-results]] +==== {api-response-body-title} + +`binary_soft_classification`:: + (object) If you chose to do binary soft classification, the API returns the + following evaluation metrics: + +`auc_roc`::: TBD + +`confusion_matrix`::: TBD + +`precision`::: TBD + +`recall`::: TBD +//// [[ml-evaluate-dfanalytics-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/df-analytics/apis/evaluateresources.asciidoc b/docs/reference/ml/df-analytics/apis/evaluateresources.asciidoc new file mode 100644 index 0000000000000..186e54bb3780b --- /dev/null +++ b/docs/reference/ml/df-analytics/apis/evaluateresources.asciidoc @@ -0,0 +1,63 @@ +[role="xpack"] +[testenv="platinum"] +[[ml-evaluate-dfanalytics-resources]] +=== {dfanalytics-cap} evaluation resources + +Evaluation configuration objects relate to the <>. + +[discrete] +[[ml-evaluate-dfanalytics-properties]] +==== {api-definitions-title} + +`evaluation`:: + (object) Defines the type of evaluation you want to perform. The value of this + object can be different depending on the type of evaluation you want to + perform. For example, it can contain <>. + +[[binary-sc-resources]] +==== Binary soft classification configuration objects + +Binary soft classification evaluates the results of an analysis which outputs +the probability that each {dataframe} row belongs to a certain class. For +example, in the context of outlier detection, the analysis outputs the +probability whether each row is an outlier. + +[discrete] +[[binary-sc-resources-properties]] +===== {api-definitions-title} + +`actual_field`:: + (string) The field of the `index` which contains the `ground + truth`. The data type of this field can be boolean or integer. If the data + type is integer, the value has to be either `0` (false) or `1` (true). + +`predicted_probability_field`:: + (string) The field of the `index` that defines the probability of whether the + item belongs to the class in question or not. It's the field that contains the + results of the analysis. + +`metrics`:: + (object) Specifies the metrics that are used for the evaluation. Available + metrics: + + `auc_roc`:: + (object) The AUC ROC (area under the curve of the receiver operating + characteristic) score and optionally the curve. + Default value is {"includes_curve": false}. + + `precision`:: + (object) Set the different thresholds of the {olscore} at where the metric + is calculated. + Default value is {"at": [0.25, 0.50, 0.75]}. + + `recall`:: + (object) Set the different thresholds of the {olscore} at where the metric + is calculated. + Default value is {"at": [0.25, 0.50, 0.75]}. + + `confusion_matrix`:: + (object) Set the different thresholds of the {olscore} at where the metrics + (`tp` - true positive, `fp` - false positive, `tn` - true negative, `fn` - + false negative) are calculated. + Default value is {"at": [0.25, 0.50, 0.75]}. + \ No newline at end of file diff --git a/docs/reference/ml/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc similarity index 74% rename from docs/reference/ml/apis/get-dfanalytics-stats.asciidoc rename to docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index 01014a11e4d9e..40a59a7e6b7c6 100644 --- a/docs/reference/ml/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -7,10 +7,10 @@ Get {dfanalytics-jobs} stats ++++ -experimental[] - Retrieves usage information for {dfanalytics-jobs}. +experimental[] + [[ml-get-dfanalytics-stats-request]] ==== {api-request-title} @@ -34,27 +34,27 @@ information, see {stack-ov}/security-privileges.html[Security privileges] and [[ml-get-dfanalytics-stats-path-params]] ==== {api-path-parms-title} -`` (Optional):: - (string) Identifier for the {dfanalytics-job}. If you do not specify one of - these options, the API returns information for the first hundred +``:: + (Optional, string)Identifier for the {dfanalytics-job}. If you do not specify + one of these options, the API returns information for the first hundred {dfanalytics-jobs}. - -`allow_no_match` (Optional) - (boolean) If `false` and the `data_frame_analytics_id` does not match any - {dfanalytics-job} an error will be returned. The default value is `true`. [[ml-get-dfanalytics-stats-query-params]] ==== {api-query-parms-title} -`from` (Optional):: - (integer) Skips the specified number of {dfanalytics-jobs}. The default value - is `0`. +`allow_no_match`:: + (Optional, boolean) If `false` and the `data_frame_analytics_id` does not + match any {dfanalytics-job} an error will be returned. The default value is + `true`. + +`from`:: + (Optional, integer) Skips the specified number of {dfanalytics-jobs}. The + default value is `0`. -`size` (Optional):: - (integer) Specifies the maximum number of {dfanalytics-jobs} to obtain. The - default value is `100`. +`size`:: + (Optional, integer) Specifies the maximum number of {dfanalytics-jobs} to + obtain. The default value is `100`. -[discrete] [[ml-get-dfanalytics-stats-response-body]] ==== {api-response-body-title} diff --git a/docs/reference/ml/apis/get-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc similarity index 75% rename from docs/reference/ml/apis/get-dfanalytics.asciidoc rename to docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc index edf14060cad12..12f603d94f7dd 100644 --- a/docs/reference/ml/apis/get-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc @@ -7,10 +7,10 @@ Get {dfanalytics-jobs} ++++ -experimental[] - Retrieves configuration information for {dfanalytics-jobs}. +experimental[] + [[ml-get-dfanalytics-request]] ==== {api-request-title} @@ -41,25 +41,37 @@ You can get information for all {dfanalytics-jobs} by using _all, by specifying [[ml-get-dfanalytics-path-params]] ==== {api-path-parms-title} -`` (Optional):: - (string) Identifier for the {dfanalytics-job}. If you do not specify one of - these options, the API returns information for the first hundred +``:: + (Optional, string) Identifier for the {dfanalytics-job}. If you do not specify + one of these options, the API returns information for the first hundred {dfanalytics-jobs}. -`allow_no_match` (Optional) +`allow_no_match` (Optional):: (boolean) If `false` and the `data_frame_analytics_id` does not match any {dfanalytics-job} an error will be returned. The default value is `true`. [[ml-get-dfanalytics-query-params]] ==== {api-query-parms-title} -`from` (Optional):: - (integer) Skips the specified number of {dfanalytics-jobs}. The default value - is `0`. +`allow_no_match`:: + (Optional, boolean) If `false` and the `data_frame_analytics_id` does not + match any {dfanalytics-job} an error will be returned. The default value is + `true`. -`size` (Optional):: - (integer) Specifies the maximum number of {dfanalytics-jobs} to obtain. The +`from`:: + (Optional, integer) Skips the specified number of {dfanalytics-jobs}. The + default value is `0`. + +`size`:: + (Optional, integer) Specifies the maximum number of {dfanalytics-jobs} to obtain. The default value is `100`. + +[[ml-get-dfanalytics-results]] +==== {api-response-body-title} + +`data_frame_analytics`:: + (array) An array of {dfanalytics-job} resources. For more information, see + <>. [[ml-get-dfanalytics-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/df-analytics/apis/index.asciidoc b/docs/reference/ml/df-analytics/apis/index.asciidoc new file mode 100644 index 0000000000000..416e11f146b70 --- /dev/null +++ b/docs/reference/ml/df-analytics/apis/index.asciidoc @@ -0,0 +1,30 @@ +[role="xpack"] +[testenv="platinum"] +[[ml-df-analytics-apis]] +== {ml-cap} {dfanalytics} APIs + +You can use the following APIs to perform {ml} {dfanalytics} activities. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +See also <>. + +//CREATE +include::put-dfanalytics.asciidoc[] +//DELETE +include::delete-dfanalytics.asciidoc[] +//EVALUATE +include::evaluate-dfanalytics.asciidoc[] +//GET +include::get-dfanalytics.asciidoc[] +include::get-dfanalytics-stats.asciidoc[] +//SET/START/STOP +include::start-dfanalytics.asciidoc[] +include::stop-dfanalytics.asciidoc[] + diff --git a/docs/reference/ml/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc similarity index 75% rename from docs/reference/ml/apis/put-dfanalytics.asciidoc rename to docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 8499950c2fb17..72b3a37f743ee 100644 --- a/docs/reference/ml/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -7,10 +7,10 @@ Create {dfanalytics-jobs} ++++ -experimental[] - Instantiates a {dfanalytics-job}. +experimental[] + [[ml-put-dfanalytics-request]] ==== {api-request-title} @@ -48,32 +48,31 @@ and mappings. [[ml-put-dfanalytics-path-params]] ==== {api-path-parms-title} -`` (Required):: - (string) A numerical character string that uniquely identifies the - {dfanalytics-job}. This identifier can contain lowercase alphanumeric characters - (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric - characters. +``:: + (Required, string) A numerical character string that uniquely identifies the + {dfanalytics-job}. This identifier can contain lowercase alphanumeric + characters (a-z and 0-9), hyphens, and underscores. It must start and end with + alphanumeric characters. [[ml-put-dfanalytics-request-body]] ==== {api-request-body-title} -`source` (Required):: - (object) The source configuration, consisting of `index` and optionally a - `query`. +`analysis`:: + (Required, object) Defines the type of {dfanalytics} you want to perform on your source + index. For example: `outlier_detection`. See <>. -`dest` (Required):: - (object) The destination configuration, consisting of `index` and optionally - `results_field` (`ml` by default). +`analyzed_fields`:: + (Optional, object) You can specify both `includes` and/or `excludes` patterns. If + `analyzed_fields` is not set, only the relevant fields will be included. For + example, all the numeric fields for {oldetection}. -`analysis` (Required):: - (object) Defines the type of {dfanalytics} you want to perform on your source - index. For example: `outlier_detection`. - See {oldetection} resources. +`dest`:: + (Required, object) The destination configuration, consisting of `index` and optionally + `results_field` (`ml` by default). See <>. -`analyzed_fields` (Optional):: - (object) You can specify both `includes` and/or `excludes` patterns. If - `analyzed_fields` is not set, only the relevant fileds will be included. For - example all the numeric fields for {oldetection}. +`source`:: + (Required, object) The source configuration, consisting of `index` and optionally a + `query`. See <>. [[ml-put-dfanalytics-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/start-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc similarity index 74% rename from docs/reference/ml/apis/start-dfanalytics.asciidoc rename to docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc index 4b2c774ae3b26..39ce13530285b 100644 --- a/docs/reference/ml/apis/start-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc @@ -8,10 +8,10 @@ Start {dfanalytics-jobs} ++++ -experimental[] - Starts a {dfanalytics-job}. +experimental[] + [[ml-start-dfanalytics-request]] ==== {api-request-title} @@ -29,14 +29,17 @@ and {stack-ov}/built-in-roles.html[Built-in roles]. [[ml-start-dfanalytics-path-params]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dfanalytics-job}. This identifier can contain - lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It - must start and end with alphanumeric characters. - -`timeout` (Optional):: - (time) Controls the amount of time to wait until the {dfanalytics-job} starts. - The default value is 20 seconds. +``:: + (Required, string) Identifier for the {dfanalytics-job}. This identifier can + contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and + underscores. It must start and end with alphanumeric characters. + +[[ml-start-dfanalytics-query-params]] +==== {api-query-parms-title} + +`timeout`:: + (Optional, time) Controls the amount of time to wait until the + {dfanalytics-job} starts. The default value is 20 seconds. [[ml-start-dfanalytics-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/stop-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc similarity index 69% rename from docs/reference/ml/apis/stop-dfanalytics.asciidoc rename to docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc index 70b1d8454f8e1..7e5ee42234b73 100644 --- a/docs/reference/ml/apis/stop-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc @@ -8,10 +8,10 @@ Stop {dfanalytics-jobs} ++++ -experimental[] - Stops one or more {dfanalytics-jobs}. +experimental[] + [[ml-stop-dfanalytics-request]] ==== {api-request-title} @@ -42,21 +42,26 @@ stop all {dfanalytics-job} by using _all or by specifying * as the [[ml-stop-dfanalytics-path-params]] ==== {api-path-parms-title} -`` (Required):: - (string) Identifier for the {dfanalytics-job}. This identifier can contain - lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It - must start and end with alphanumeric characters. +``:: + (Required, string) Identifier for the {dfanalytics-job}. This identifier can + contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and + underscores. It must start and end with alphanumeric characters. -`timeout` (Optional):: - Controls the amount of time to wait until the {dfanalytics-job} stops. - The default value is 20 seconds. +[[ml-stop-dfanalytics-query-params]] +==== {api-query-parms-title} -`force` (Optional):: - (boolean) If true, the {dfanalytics-job} is stopped forcefully. +`allow_no_match`:: + (Optional, boolean) If `false` and the `data_frame_analytics_id` does not + match any {dfanalytics-job} an error will be returned. The default value is + `true`. + +`force`:: + (Optional, boolean) If true, the {dfanalytics-job} is stopped forcefully. -`allow_no_match` (Optional) - (boolean) If `false` and the `data_frame_analytics_id` does not match any - {dfanalytics-job} an error will be returned. The default value is `true`. +`timeout`:: + (Optional, time) Controls the amount of time to wait until the + {dfanalytics-job} stops. The default value is 20 seconds. + [[ml-stop-dfanalytics-example]] ==== {api-examples-title} diff --git a/docs/reference/modules.asciidoc b/docs/reference/modules.asciidoc index e9f187a98e3c7..2db62c2075611 100644 --- a/docs/reference/modules.asciidoc +++ b/docs/reference/modules.asciidoc @@ -89,7 +89,6 @@ include::modules/network.asciidoc[] include::modules/node.asciidoc[] -:edit_url: include::modules/plugins.asciidoc[] include::modules/snapshots.asciidoc[] diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index f9a5e16174744..5f1f595ad058f 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -305,7 +305,7 @@ is located. Once each cluster has responded to such request, the search executes as if all shards were part of the same cluster. The coordinating node sends one request to each shard involved, each shard executes the query and returns its own results which are then reduced (and fetched, depending on the -<>) by the CCS coordinating node. +<>) by the CCS coordinating node. This strategy may be beneficial whenever there is very low network latency between the CCS coordinating node and the remote clusters involved, as it treats all shards the same, at the cost of sending many requests to each remote diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index ed52c0958fec0..240fbb6e70e30 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -11,12 +11,16 @@ Snapshots are taken incrementally. This means that when it creates a snapshot of an index, Elasticsearch avoids copying any data that is already stored in the repository as part of an earlier snapshot of the same index. Therefore it can be efficient to take snapshots of your cluster quite frequently. +// end::snapshot-intro[] +// tag::restore-intro[] You can restore snapshots into a running cluster via the <>. When you restore an index, you can alter the name of the restored index as well as some of its settings. There is a great deal of flexibility in how the snapshot and restore functionality can be used. +// end::restore-intro[] +// tag::backup-warning[] WARNING: You cannot back up an Elasticsearch cluster by simply taking a copy of the data directories of all of its nodes. Elasticsearch may be making changes to the contents of its data directories while it is running; copying its data @@ -26,7 +30,7 @@ corruption and/or missing files. Alternatively, it may appear to have succeeded though it silently lost some of its data. The only reliable way to back up a cluster is by using the snapshot and restore functionality. -// end::snapshot-intro[] +// end::backup-warning[] [float] === Version compatibility @@ -66,6 +70,7 @@ recommend testing the reindex from remote process with a subset of your data to understand the time requirements before proceeding. [float] +[[snapshots-repositories]] === Repositories You must register a snapshot repository before you can perform snapshot and @@ -325,6 +330,7 @@ POST /_snapshot/my_unverified_backup/_verify It returns a list of nodes where repository was successfully verified or an error message if verification process failed. [float] +[[snapshots-take-snapshot]] === Snapshot A repository can contain multiple snapshots of the same cluster. Snapshots are identified by unique names within the @@ -353,7 +359,7 @@ PUT /_snapshot/my_backup/snapshot_2?wait_for_completion=true "indices": "index_1,index_2", "ignore_unavailable": true, "include_global_state": false, - "_meta": { + "metadata": { "taken_by": "kimchy", "taken_because": "backup before upgrading" } @@ -370,7 +376,7 @@ By setting `include_global_state` to false it's possible to prevent the cluster the snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have all primary shards available. This behaviour can be changed by setting `partial` to `true`. -The `_meta` field can be used to attach arbitrary metadata to the snapshot. This may be a record of who took the snapshot, +The `metadata` field can be used to attach arbitrary metadata to the snapshot. This may be a record of who took the snapshot, why it was taken, or any other data that might be useful. Snapshot names can be automatically derived using <>, similarly as when creating diff --git a/docs/reference/query-dsl.asciidoc b/docs/reference/query-dsl.asciidoc index 8d9c803b6159a..74d22d6de411e 100644 --- a/docs/reference/query-dsl.asciidoc +++ b/docs/reference/query-dsl.asciidoc @@ -29,22 +29,22 @@ Query clauses behave differently depending on whether they are used in include::query-dsl/query_filter_context.asciidoc[] -include::query-dsl/match-all-query.asciidoc[] +include::query-dsl/compound-queries.asciidoc[] include::query-dsl/full-text-queries.asciidoc[] -include::query-dsl/term-level-queries.asciidoc[] - -include::query-dsl/compound-queries.asciidoc[] +include::query-dsl/geo-queries.asciidoc[] include::query-dsl/joining-queries.asciidoc[] -include::query-dsl/geo-queries.asciidoc[] +include::query-dsl/match-all-query.asciidoc[] + +include::query-dsl/span-queries.asciidoc[] include::query-dsl/special-queries.asciidoc[] -include::query-dsl/span-queries.asciidoc[] +include::query-dsl/term-level-queries.asciidoc[] include::query-dsl/minimum-should-match.asciidoc[] -include::query-dsl/multi-term-rewrite.asciidoc[] +include::query-dsl/multi-term-rewrite.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc index 49dc4e2364efb..1d6edfdbd0c6f 100644 --- a/docs/reference/query-dsl/bool-query.asciidoc +++ b/docs/reference/query-dsl/bool-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-bool-query]] -=== Bool Query +=== Boolean query +++++ +Boolean +++++ A query that matches documents matching boolean combinations of other queries. The bool query maps to Lucene `BooleanQuery`. It is built using @@ -134,5 +137,5 @@ GET _search If you need to know which of the clauses in the bool query matched the documents returned from the query, you can use -<> to assign a name to +<> to assign a name to each clause. diff --git a/docs/reference/query-dsl/boosting-query.asciidoc b/docs/reference/query-dsl/boosting-query.asciidoc index c57235e71606d..07a0d00cc6475 100644 --- a/docs/reference/query-dsl/boosting-query.asciidoc +++ b/docs/reference/query-dsl/boosting-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-boosting-query]] -=== Boosting Query +=== Boosting query +++++ +Boosting +++++ Returns documents matching a `positive` query while reducing the <> of documents that also match a diff --git a/docs/reference/query-dsl/compound-queries.asciidoc b/docs/reference/query-dsl/compound-queries.asciidoc index bee5787df1d28..d156950e35579 100644 --- a/docs/reference/query-dsl/compound-queries.asciidoc +++ b/docs/reference/query-dsl/compound-queries.asciidoc @@ -7,39 +7,34 @@ filter context. The queries in this group are: -<>:: - -A query which wraps another query, but executes it in filter context. All -matching documents are given the same ``constant'' `_score`. - <>:: - The default query for combining multiple leaf or compound query clauses, as `must`, `should`, `must_not`, or `filter` clauses. The `must` and `should` clauses have their scores combined -- the more matching clauses, the better -- while the `must_not` and `filter` clauses are executed in filter context. -<>:: +<>:: +Return documents which match a `positive` query, but reduce the score of +documents which also match a `negative` query. +<>:: +A query which wraps another query, but executes it in filter context. All +matching documents are given the same ``constant'' `_score`. + +<>:: A query which accepts multiple queries, and returns any documents which match any of the query clauses. While the `bool` query combines the scores from all matching queries, the `dis_max` query uses the score of the single best- matching query clause. <>:: - Modify the scores returned by the main query with functions to take into account factors like popularity, recency, distance, or custom algorithms implemented with scripting. -<>:: - -Return documents which match a `positive` query, but reduce the score of -documents which also match a `negative` query. - -include::constant-score-query.asciidoc[] include::bool-query.asciidoc[] -include::dis-max-query.asciidoc[] -include::function-score-query.asciidoc[] include::boosting-query.asciidoc[] +include::constant-score-query.asciidoc[] +include::dis-max-query.asciidoc[] +include::function-score-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/constant-score-query.asciidoc b/docs/reference/query-dsl/constant-score-query.asciidoc index bfcece8d62e2f..034c5167d60d4 100644 --- a/docs/reference/query-dsl/constant-score-query.asciidoc +++ b/docs/reference/query-dsl/constant-score-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-constant-score-query]] -=== Constant Score Query +=== Constant score query +++++ +Constant score +++++ Wraps a <> and returns every matching document with a <> equal to the `boost` diff --git a/docs/reference/query-dsl/dis-max-query.asciidoc b/docs/reference/query-dsl/dis-max-query.asciidoc index 9a0f1fb7b039c..771969678b899 100644 --- a/docs/reference/query-dsl/dis-max-query.asciidoc +++ b/docs/reference/query-dsl/dis-max-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-dis-max-query]] -=== Disjunction Max Query +=== Disjunction max query +++++ +Disjunction max +++++ Returns documents matching one or more wrapped queries, called query clauses or clauses. diff --git a/docs/reference/query-dsl/distance-feature-query.asciidoc b/docs/reference/query-dsl/distance-feature-query.asciidoc index 513449e04c627..6ad441af61004 100644 --- a/docs/reference/query-dsl/distance-feature-query.asciidoc +++ b/docs/reference/query-dsl/distance-feature-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-distance-feature-query]] -=== Distance Feature Query +=== Distance feature query +++++ +Distance feature +++++ The `distance_feature` query is a specialized query that only works on <>, <> or <> diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index f35e97db8ec18..db44a6f40b7d7 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-exists-query]] -=== Exists Query +=== Exists query +++++ +Exists +++++ Returns documents that contain an indexed value for a field. diff --git a/docs/reference/query-dsl/full-text-queries.asciidoc b/docs/reference/query-dsl/full-text-queries.asciidoc index 8fc53bc7e9b8a..e649fbae6f270 100644 --- a/docs/reference/query-dsl/full-text-queries.asciidoc +++ b/docs/reference/query-dsl/full-text-queries.asciidoc @@ -7,56 +7,49 @@ the field during indexing. The queries in this group are: +<>:: +A full text query that allows fine-grained control of the ordering and +proximity of matching terms. + <>:: +The standard query for performing full text queries, including fuzzy matching +and phrase or proximity queries. - The standard query for performing full text queries, including fuzzy matching - and phrase or proximity queries. +<>:: +Creates a `bool` query that matches each term as a `term` query, except for +the last term, which is matched as a `prefix` query <>:: - - Like the `match` query but used for matching exact phrases or word proximity matches. +Like the `match` query but used for matching exact phrases or word proximity matches. <>:: - - Like the `match_phrase` query, but does a wildcard search on the final word. - -<>:: - - Creates a `bool` query that matches each term as a `term` query, except for - the last term, which is matched as a `prefix` query - +Like the `match_phrase` query, but does a wildcard search on the final word. + <>:: - - The multi-field version of the `match` query. +The multi-field version of the `match` query. <>:: - - Supports the compact Lucene <>, - allowing you to specify AND|OR|NOT conditions and multi-field search - within a single query string. For expert users only. +Supports the compact Lucene <>, +allowing you to specify AND|OR|NOT conditions and multi-field search +within a single query string. For expert users only. <>:: +A simpler, more robust version of the `query_string` syntax suitable +for exposing directly to users. - A simpler, more robust version of the `query_string` syntax suitable - for exposing directly to users. - -<>:: - A full text query that allows fine-grained control of the ordering and - proximity of matching terms +include::intervals-query.asciidoc[] include::match-query.asciidoc[] +include::match-bool-prefix-query.asciidoc[] + include::match-phrase-query.asciidoc[] include::match-phrase-prefix-query.asciidoc[] -include::match-bool-prefix-query.asciidoc[] - include::multi-match-query.asciidoc[] include::query-string-query.asciidoc[] -include::simple-query-string-query.asciidoc[] - -include::intervals-query.asciidoc[] +include::simple-query-string-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 60b87baf3d758..d5f20a57dc228 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-function-score-query]] -=== Function Score Query +=== Function score query +++++ +Function score +++++ The `function_score` allows you to modify the score of documents that are retrieved by a query. This can be useful if, for example, a score diff --git a/docs/reference/query-dsl/fuzzy-query.asciidoc b/docs/reference/query-dsl/fuzzy-query.asciidoc index 4be546916240f..ecf43f90a1a6a 100644 --- a/docs/reference/query-dsl/fuzzy-query.asciidoc +++ b/docs/reference/query-dsl/fuzzy-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-fuzzy-query]] -=== Fuzzy Query +=== Fuzzy query +++++ +Fuzzy +++++ The fuzzy query uses similarity based on Levenshtein edit distance. diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 1a088a350145f..b0523e09a3a65 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-geo-bounding-box-query]] -=== Geo Bounding Box Query +=== Geo-bounding box query +++++ +Geo-bounding box +++++ A query allowing to filter hits based on a point location using a bounding box. Assuming the following indexed document: diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index da7b0ecfd81e5..7a7f749687ee6 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-geo-distance-query]] -=== Geo Distance Query +=== Geo-distance query +++++ +Geo-distance +++++ Filters documents that include only hits that exists within a specific distance from a geo point. Assuming the following mapping and indexed diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index c33b227824bdf..062e44cf03d0c 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-geo-polygon-query]] -=== Geo Polygon Query +=== Geo-polygon query +++++ +Geo-polygon +++++ A query returning hits that only fall within a polygon of points. Here is an example: diff --git a/docs/reference/query-dsl/geo-queries.asciidoc b/docs/reference/query-dsl/geo-queries.asciidoc index 5220b00101e98..b3cc9112576cc 100644 --- a/docs/reference/query-dsl/geo-queries.asciidoc +++ b/docs/reference/query-dsl/geo-queries.asciidoc @@ -8,29 +8,24 @@ lines, circles, polygons, multi-polygons, etc. The queries in this group are: -<> query:: - - Finds documents with geo-shapes which either intersect, are contained by, or - do not intersect with the specified geo-shape. - <> query:: - - Finds documents with geo-points that fall into the specified rectangle. +Finds documents with geo-points that fall into the specified rectangle. <> query:: - - Finds documents with geo-points within the specified distance of a central - point. +Finds documents with geo-points within the specified distance of a central point. <> query:: +Find documents with geo-points within the specified polygon. - Find documents with geo-points within the specified polygon. - +<> query:: +Finds documents with geo-shapes which either intersect, are contained by, or do not intersect with the specified +geo-shape. -include::geo-shape-query.asciidoc[] include::geo-bounding-box-query.asciidoc[] include::geo-distance-query.asciidoc[] include::geo-polygon-query.asciidoc[] + +include::geo-shape-query.asciidoc[] diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 424968090d6ab..79395f24e3ce9 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-geo-shape-query]] -=== GeoShape Query +=== Geo-shape query +++++ +Geo-shape +++++ Filter documents indexed using the `geo_shape` type. diff --git a/docs/reference/query-dsl/has-child-query.asciidoc b/docs/reference/query-dsl/has-child-query.asciidoc index b75c06c99c566..0b3941ea31976 100644 --- a/docs/reference/query-dsl/has-child-query.asciidoc +++ b/docs/reference/query-dsl/has-child-query.asciidoc @@ -1,135 +1,161 @@ [[query-dsl-has-child-query]] -=== Has Child Query - -The `has_child` filter accepts a query and the child type to run against, and -results in parent documents that have child docs matching the query. Here is -an example: +=== Has child query +++++ +Has child +++++ + +Returns parent documents whose <> child documents match a +provided query. You can create parent-child relationships between documents in +the same index using a <> field mapping. + +[WARNING] +==== +Because it performs a join, the `has_child` is slow compared to other queries. +Its performance degrades as the number of matching child documents pointing to +unique parent documents increases. Each `has_child` query in a search can +increase query time significantly. + +If you care about query performance, do not use this query. If you need to use +the `has_child` query, use it as rarely as possible. +==== + +[[has-child-query-ex-request]] +==== Example request + +[[has-child-index-setup]] +===== Index setup +To use the `has_child` query, your index must include a <> +field mapping. For example: [source,js] --------------------------------------------------- -GET /_search +---- +PUT /my_index { - "query": { - "has_child" : { - "type" : "blog_tag", - "query" : { - "term" : { - "tag" : "something" + "mappings": { + "properties" : { + "my-join-field" : { + "type" : "join", + "relations": { + "parent": "child" } } } } } --------------------------------------------------- + +---- // CONSOLE +// TESTSETUP -Note that the `has_child` is a slow query compared to other queries in the -query dsl due to the fact that it performs a join. The performance degrades -as the number of matching child documents pointing to unique parent documents -increases. If you care about query performance you should not use this query. -However if you do happen to use this query then use it as little as possible. -Each `has_child` query that gets added to a search request can increase query -time significantly. - -[float] -==== Scoring capabilities - -The `has_child` also has scoring support. The -supported score modes are `min`, `max`, `sum`, `avg` or `none`. The default is -`none` and yields the same behaviour as in previous versions. If the -score mode is set to another value than `none`, the scores of all the -matching child documents are aggregated into the associated parent -documents. The score type can be specified with the `score_mode` field -inside the `has_child` query: +[[has-child-query-ex-query]] +===== Example query [source,js] --------------------------------------------------- +---- GET /_search { "query": { "has_child" : { - "type" : "blog_tag", - "score_mode" : "min", + "type" : "child", "query" : { - "term" : { - "tag" : "something" - } - } + "match_all" : {} + }, + "max_children": 10, + "min_children": 2, + "score_mode" : "min" } } } --------------------------------------------------- +---- // CONSOLE -[float] -[[min-max-children]] -==== Min/Max Children +[[has-child-top-level-params]] +==== Top-level parameters for `has_child` -The `has_child` query allows you to specify that a minimum and/or maximum -number of children are required to match for the parent doc to be considered -a match: +`type`:: +(string) Required. Name of the child relationship mapped for the +<> field. -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "has_child" : { - "type" : "blog_tag", - "score_mode" : "min", - "min_children": 2, <1> - "max_children": 10, <1> - "query" : { - "term" : { - "tag" : "something" - } - } - } - } -} --------------------------------------------------- -// CONSOLE -<1> Both `min_children` and `max_children` are optional. +`query`:: +(query object) Required. Query you wish to run on child documents of the `type` +field. If a child document matches the search, the query returns the parent +document. + +`ignore_unmapped`:: ++ +-- +(boolean) Optional. Indicates whether to ignore an unmapped `type` and not return +any documents instead of an error. Defaults to `false`. + +If `false`, {es} returns an error if the `type` is unmapped. -The `min_children` and `max_children` parameters can be combined with -the `score_mode` parameter. +You can use this parameter to query multiple indices that may not contain the +`type`. +-- -[float] -==== Ignore Unmapped +`max_children`:: +(integer) Optional. Maximum number of child documents that match the `query` +allowed for a returned parent document. If the parent document exceeds this +limit, it is excluded from the search results. -When set to `true` the `ignore_unmapped` option will ignore an unmapped `type` -and will not match any documents for this query. This can be useful when -querying multiple indexes which might have different mappings. When set to -`false` (the default value) the query will throw an exception if the `type` -is not mapped. +`min_children`:: +(integer) Optional. Minimum number of child documents that match the `query` +required to match the query for a returned parent document. If the parent +document does not meet this limit, it is excluded from the search results. -[float] -==== Sorting +`score_mode`:: ++ +-- +(string) Optional. Indicates how scores for matching child documents affect the +root parent document's <>. Valid values +are: -Parent documents can't be sorted by fields in matching child documents via the -regular sort options. If you need to sort parent document by field in the child -documents then you should use the `function_score` query and then just sort -by `_score`. +`none` (Default):: +Do not use the relevance scores of matching child documents. The query assigns +parent documents a score of `0`. -Sorting blogs by child documents' `click_count` field: +`avg`:: +Use the mean relevance score of all matching child documents. + +`max`:: +Uses the highest relevance score of all matching child documents. + +`min`:: +Uses the lowest relevance score of all matching child documents. + +`sum`:: +Add together the relevance scores of all matching child documents. +-- + +[[has-child-query-notes]] +==== Notes + +[[has-child-query-performance]] +===== Sorting +You cannot sort the results of a `has_child` query using standard +<>. + +If you need to sort returned documents by a field in their child documents, use +a `function_score` query and sort by `_score`. For example, the following query +sorts returned documents by the `click_count` field of their child documents. [source,js] --------------------------------------------------- +---- GET /_search { "query": { "has_child" : { - "type" : "blog_tag", - "score_mode" : "max", + "type" : "child", "query" : { "function_score" : { "script_score": { "script": "_score * doc['click_count'].value" } } - } + }, + "score_mode" : "max" } } } --------------------------------------------------- +---- // CONSOLE diff --git a/docs/reference/query-dsl/has-parent-query.asciidoc b/docs/reference/query-dsl/has-parent-query.asciidoc index 4065a9d99fe2e..39cb22fb75094 100644 --- a/docs/reference/query-dsl/has-parent-query.asciidoc +++ b/docs/reference/query-dsl/has-parent-query.asciidoc @@ -1,93 +1,132 @@ [[query-dsl-has-parent-query]] -=== Has Parent Query +=== Has parent query +++++ +Has parent +++++ -The `has_parent` query accepts a query and a parent type. The query is -executed in the parent document space, which is specified by the parent -type. This query returns child documents which associated parents have -matched. For the rest `has_parent` query has the same options and works -in the same manner as the `has_child` query. +Returns child documents whose <> parent document matches a +provided query. You can create parent-child relationships between documents in +the same index using a <> field mapping. + +[WARNING] +==== +Because it performs a join, the `has_parent` query is slow compared to other queries. +Its performance degrades as the number of matching parent documents increases. +Each `has_parent` query in a search can increase query time significantly. +==== + +[[has-parent-query-ex-request]] +==== Example request + +[[has-parent-index-setup]] +===== Index setup +To use the `has_parent` query, your index must include a <> +field mapping. For example: [source,js] --------------------------------------------------- -GET /_search +---- +PUT /my-index { - "query": { - "has_parent" : { - "parent_type" : "blog", - "query" : { - "term" : { - "tag" : "something" + "mappings": { + "properties" : { + "my-join-field" : { + "type" : "join", + "relations": { + "parent": "child" } + }, + "tag" : { + "type" : "keyword" } } } } --------------------------------------------------- -// CONSOLE -Note that the `has_parent` is a slow query compared to other queries in the -query dsl due to the fact that it performs a join. The performance degrades -as the number of matching parent documents increases. If you care about query -performance you should not use this query. However if you do happen to use -this query then use it as less as possible. Each `has_parent` query that gets -added to a search request can increase query time significantly. - -[float] -==== Scoring capabilities +---- +// CONSOLE +// TESTSETUP -The `has_parent` also has scoring support. The default is `false` which -ignores the score from the parent document. The score is in this -case equal to the boost on the `has_parent` query (Defaults to 1). If -the score is set to `true`, then the score of the matching parent -document is aggregated into the child documents belonging to the -matching parent document. The score mode can be specified with the -`score` field inside the `has_parent` query: +[[has-parent-query-ex-query]] +===== Example query [source,js] --------------------------------------------------- -GET /_search +---- +GET /my-index/_search { "query": { "has_parent" : { - "parent_type" : "blog", - "score" : true, + "parent_type" : "parent", "query" : { "term" : { - "tag" : "something" + "tag" : { + "value" : "Elasticsearch" + } } } } } } --------------------------------------------------- +---- // CONSOLE -[float] -==== Ignore Unmapped +[[has-parent-top-level-params]] +==== Top-level parameters for `has_parent` + +`parent_type`:: +(Required, string) Name of the parent relationship mapped for the +<> field. + +`query`:: +(Required, query object) Query you wish to run on parent documents of the +`parent_type` field. If a parent document matches the search, the query returns +its child documents. + +`score`:: ++ +-- +(Optional, boolean) Indicates whether the <> of a matching parent document is aggregated into its child documents. +Defaults to `false`. -When set to `true` the `ignore_unmapped` option will ignore an unmapped `type` -and will not match any documents for this query. This can be useful when -querying multiple indexes which might have different mappings. When set to -`false` (the default value) the query will throw an exception if the `type` -is not mapped. +If `false`, {es} ignores the relevance score of the parent document. {es} also +assigns each child document a relevance score equal to the `query`'s `boost`, +which defaults to `1`. -[float] -==== Sorting +If `true`, the relevance score of the matching parent document is aggregated +into its child documents' relevance scores. +-- -Child documents can't be sorted by fields in matching parent documents via the -regular sort options. If you need to sort child documents by field in the parent -documents then you should use the `function_score` query and then just sort -by `_score`. +`ignore_unmapped`:: ++ +-- +(Optional, boolean) Indicates whether to ignore an unmapped `parent_type` and +not return any documents instead of an error. Defaults to `false`. -Sorting tags by parent document' `view_count` field: +If `false`, {es} returns an error if the `parent_type` is unmapped. + +You can use this parameter to query multiple indices that may not contain the +`parent_type`. +-- + +[[has-parent-query-notes]] +==== Notes + +[[has-parent-query-performance]] +===== Sorting +You cannot sort the results of a `has_parent` query using standard +<>. + +If you need to sort returned documents by a field in their parent documents, use +a `function_score` query and sort by `_score`. For example, the following query +sorts returned documents by the `view_count` field of their parent documents. [source,js] --------------------------------------------------- +---- GET /_search { "query": { "has_parent" : { - "parent_type" : "blog", + "parent_type" : "parent", "score" : true, "query" : { "function_score" : { @@ -99,5 +138,5 @@ GET /_search } } } --------------------------------------------------- -// CONSOLE +---- +// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/ids-query.asciidoc b/docs/reference/query-dsl/ids-query.asciidoc index 43de8cb7332d3..afbd663506147 100644 --- a/docs/reference/query-dsl/ids-query.asciidoc +++ b/docs/reference/query-dsl/ids-query.asciidoc @@ -1,5 +1,9 @@ [[query-dsl-ids-query]] -=== Ids Query +=== IDs +++++ +IDs +++++ + Returns documents based on their IDs. This query uses document IDs stored in the <> field. diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 7353ca137f3e1..6581f3eff3a29 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-intervals-query]] === Intervals query +++++ +Intervals +++++ An `intervals` query allows fine-grained control over the order and proximity of matching terms. Matching rules are constructed from a small set of definitions, diff --git a/docs/reference/query-dsl/joining-queries.asciidoc b/docs/reference/query-dsl/joining-queries.asciidoc index e40b8655066e5..69fcca8690079 100644 --- a/docs/reference/query-dsl/joining-queries.asciidoc +++ b/docs/reference/query-dsl/joining-queries.asciidoc @@ -6,13 +6,11 @@ prohibitively expensive. Instead, Elasticsearch offers two forms of join which are designed to scale horizontally. <>:: - Documents may contain fields of type <>. These fields are used to index arrays of objects, where each object can be queried (with the `nested` query) as an independent document. <> and <> queries:: - A <> can exist between documents within a single index. The `has_child` query returns parent documents whose child documents match the specified query, while the diff --git a/docs/reference/query-dsl/match-all-query.asciidoc b/docs/reference/query-dsl/match-all-query.asciidoc index 6e44882867624..31d4f64aef3b2 100644 --- a/docs/reference/query-dsl/match-all-query.asciidoc +++ b/docs/reference/query-dsl/match-all-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-match-all-query]] -== Match All Query +== Match all query +++++ +Match all +++++ The most simple query, which matches all documents, giving them all a `_score` of `1.0`. diff --git a/docs/reference/query-dsl/match-bool-prefix-query.asciidoc b/docs/reference/query-dsl/match-bool-prefix-query.asciidoc index 623f2423d8055..36699bed81ad3 100644 --- a/docs/reference/query-dsl/match-bool-prefix-query.asciidoc +++ b/docs/reference/query-dsl/match-bool-prefix-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-match-bool-prefix-query]] -=== Match Bool Prefix Query +=== Match boolean prefix query +++++ +Match boolean prefix +++++ A `match_bool_prefix` query analyzes its input and constructs a <> from the terms. Each term except the last diff --git a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc index 304eaf9a5b4f0..72fc506016ac5 100644 --- a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-match-query-phrase-prefix]] -=== Match Phrase Prefix Query +=== Match phrase prefix query +++++ +Match phrase prefix +++++ The `match_phrase_prefix` is the same as `match_phrase`, except that it allows for prefix matches on the last term in the text. For example: @@ -58,7 +61,7 @@ the user will continue to type more letters until the word they are looking for appears. For better solutions for _search-as-you-type_ see the -<> and +<> and the <>. =================================================== diff --git a/docs/reference/query-dsl/match-phrase-query.asciidoc b/docs/reference/query-dsl/match-phrase-query.asciidoc index 1f4b19eedc132..ed847c419af60 100644 --- a/docs/reference/query-dsl/match-phrase-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-match-query-phrase]] -=== Match Phrase Query +=== Match phrase query +++++ +Match phrase +++++ The `match_phrase` query analyzes the text and creates a `phrase` query out of the analyzed text. For example: diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index 4fcb40a76ec9c..68886d4736067 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-match-query]] -=== Match Query +=== Match query +++++ +Match +++++ `match` queries accept text/numerics/dates, analyzes diff --git a/docs/reference/query-dsl/minimum-should-match.asciidoc b/docs/reference/query-dsl/minimum-should-match.asciidoc index d6395da95e498..e0610ca42407e 100644 --- a/docs/reference/query-dsl/minimum-should-match.asciidoc +++ b/docs/reference/query-dsl/minimum-should-match.asciidoc @@ -1,5 +1,5 @@ [[query-dsl-minimum-should-match]] -== Minimum Should Match +== `minimum_should_match` parameter The `minimum_should_match` parameter possible values: diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index e59cd24292d48..1d9de562083a8 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-mlt-query]] -=== More Like This Query +=== More like this query +++++ +More like this +++++ The More Like This Query finds documents that are "like" a given set of documents. In order to do so, MLT selects a set of representative terms diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index e7bcd799df702..7708a6e4e1d24 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-multi-match-query]] -=== Multi Match Query +=== Multi-match query +++++ +Multi-match +++++ The `multi_match` query builds on the <> to allow multi-field queries: diff --git a/docs/reference/query-dsl/multi-term-rewrite.asciidoc b/docs/reference/query-dsl/multi-term-rewrite.asciidoc index 391b42ea00791..dbac78b5c3038 100644 --- a/docs/reference/query-dsl/multi-term-rewrite.asciidoc +++ b/docs/reference/query-dsl/multi-term-rewrite.asciidoc @@ -1,5 +1,5 @@ [[query-dsl-multi-term-rewrite]] -== `rewrite` Parameter +== `rewrite` parameter WARNING: This parameter is for expert users only. Changing the value of this parameter can impact search performance and relevance. diff --git a/docs/reference/query-dsl/nested-query.asciidoc b/docs/reference/query-dsl/nested-query.asciidoc index c58d68b73cff1..72048c9aee5f5 100644 --- a/docs/reference/query-dsl/nested-query.asciidoc +++ b/docs/reference/query-dsl/nested-query.asciidoc @@ -1,15 +1,26 @@ [[query-dsl-nested-query]] -=== Nested Query +=== Nested query +++++ +Nested +++++ -Nested query allows to query nested objects / docs (see -<>). The -query is executed against the nested objects / docs as if they were -indexed as separate docs (they are, internally) and resulting in the -root parent doc (or parent nested mapping). Here is a sample mapping we -will work with: +Wraps another query to search <> fields. + +The `nested` query searches nested field objects as if they were indexed as +separate documents. If an object matches the search, the `nested` query returns +the root parent document. + +[[nested-query-ex-request]] +==== Example request + +[[nested-query-index-setup]] +===== Index setup + +To use the `nested` query, your index must include a <> field +mapping. For example: [source,js] --------------------------------------------------- +---- PUT /my_index { "mappings": { @@ -21,20 +32,20 @@ PUT /my_index } } --------------------------------------------------- +---- // CONSOLE // TESTSETUP -And here is a sample nested query usage: +[[nested-query-ex-query]] +===== Example query [source,js] --------------------------------------------------- -GET /_search +---- +GET /my_index/_search { "query": { "nested" : { "path" : "obj1", - "score_mode" : "avg", "query" : { "bool" : { "must" : [ @@ -42,29 +53,65 @@ GET /_search { "range" : {"obj1.count" : {"gt" : 5}} } ] } - } + }, + "score_mode" : "avg" } } } --------------------------------------------------- +---- // CONSOLE -The query `path` points to the nested object path, and the `query` -includes the query that will run on the nested docs matching the -direct path, and joining with the root parent docs. Note that any -fields referenced inside the query must use the complete path (fully -qualified). - -The `score_mode` allows to set how inner children matching affects -scoring of parent. It defaults to `avg`, but can be `sum`, `min`, -`max` and `none`. - -There is also an `ignore_unmapped` option which, when set to `true` will -ignore an unmapped `path` and will not match any documents for this query. -This can be useful when querying multiple indexes which might have different -mappings. When set to `false` (the default value) the query will throw an -exception if the `path` is not mapped. - -Multi level nesting is automatically supported, and detected, resulting -in an inner nested query to automatically match the relevant nesting -level (and not root) if it exists within another nested query. +[[nested-top-level-params]] +==== Top-level parameters for `nested` + +`path` (Required):: +(string) Path to the nested object you wish to search. + +`query` (Required):: ++ +-- +(query object) Query you wish to run on nested objects in the `path`. If an +object matches the search, the `nested` query returns the root parent document. + +You can search nested fields using dot notation that includes the complete path, +such as `obj1.name`. + +Multi-level nesting is automatically supported, and detected, resulting in an +inner nested query to automatically match the relevant nesting level, rather +than root, if it exists within another nested query. +-- + +`score_mode` (Optional):: ++ +-- +(string) Indicates how scores for matching child objects affect the root +parent document's <>. Valid values are: + +`avg` (Default):: +Use the mean relevance score of all matching child objects. + +`max`:: +Uses the highest relevance score of all matching child objects. + +`min`:: +Uses the lowest relevance score of all matching child objects. + +`none`:: +Do not use the relevance scores of matching child objects. The query assigns +parent documents a score of `0`. + +`sum`:: +Add together the relevance scores of all matching child objects. +-- + +`ignore_unmapped` (Optional):: ++ +-- +(boolean) Indicates whether to ignore an unmapped `path` and not return any +documents instead of an error. Defaults to `false`. + +If `false`, {es} returns an error if the `path` is an unmapped field. + +You can use this parameter to query multiple indices that may not contain the +field `path`. +-- \ No newline at end of file diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index aa2074e7d1b7e..3add028c5c19c 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -1,68 +1,116 @@ [[query-dsl-parent-id-query]] -=== Parent Id Query +=== Parent ID query +++++ +Parent ID +++++ -The `parent_id` query can be used to find child documents which belong to a particular parent. -Given the following mapping definition: +Returns child documents <> to a specific parent document. +You can use a <> field mapping to create parent-child +relationships between documents in the same index. +[[parent-id-query-ex-request]] +==== Example request + +[[parent-id-index-setup]] +===== Index setup +To use the `parent_id` query, your index must include a <> +field mapping. To see how you can set up an index for the `parent_id` query, try +the following example. + +. Create an index with a <> field mapping. ++ +-- [source,js] --------------------------------------------- -PUT my_index +---- +PUT /my-index { - "mappings": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "my_parent": "my_child" + "mappings": { + "properties" : { + "my-join-field" : { + "type" : "join", + "relations": { + "my-parent": "my-child" + } + } } - } } - } } -PUT my_index/_doc/1?refresh +---- +// CONSOLE +// TESTSETUP +-- + +. Index a parent document with an ID of `1`. ++ +-- +[source,js] +---- +PUT /my-index/_doc/1?refresh { - "text": "This is a parent document", - "my_join_field": "my_parent" + "text": "This is a parent document.", + "my-join-field": "my-parent" } +---- +// CONSOLE +-- -PUT my_index/_doc/2?routing=1&refresh +. Index a child document of the parent document. ++ +-- +[source,js] +---- +PUT /my-index/_doc/2?routing=1&refresh { - "text": "This is a child document", + "text": "This is a child document.", "my_join_field": { - "name": "my_child", + "name": "my-child", "parent": "1" } } - --------------------------------------------- +---- // CONSOLE -// TESTSETUP +-- + +[[parent-id-query-ex-query]] +===== Example query + +The following search returns child documents for a parent document with an ID of +`1`. [source,js] --------------------------------------------------- -GET /my_index/_search +---- +GET /my-index/_search { "query": { - "parent_id": { - "type": "my_child", - "id": "1" - } + "parent_id": { + "type": "my-child", + "id": "1" + } } } --------------------------------------------------- +---- // CONSOLE +[[parent-id-top-level-params]] +==== Top-level parameters for `parent_id` + +`type`:: +(Required, string) Name of the child relationship mapped for the +<> field. -==== Parameters +`id`:: +(Required, string) ID of the parent document. The query will return child +documents of this parent document. -This query has two required parameters: +`ignore_unmapped`:: ++ +-- +(Optional, boolean) Indicates whether to ignore an unmapped `type` and not +return any documents instead of an error. Defaults to `false`. -[horizontal] -`type`:: The **child** type name, as specified in the <>. -`id`:: The ID of the parent document. +If `false`, {es} returns an error if the `type` is unmapped. -`ignore_unmapped`:: When set to `true` this will ignore an unmapped `type` and will not match any -documents for this query. This can be useful when querying multiple indexes -which might have different mappings. When set to `false` (the default value) -the query will throw an exception if the `type` is not mapped. +You can use this parameter to query multiple indices that may not contain the +`type`. +-- diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 6444bdb743e65..314c30bfc1a9b 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-percolate-query]] -=== Percolate Query +=== Percolate query +++++ +Percolate +++++ The `percolate` query can be used to match queries stored in an index. The `percolate` query itself diff --git a/docs/reference/query-dsl/prefix-query.asciidoc b/docs/reference/query-dsl/prefix-query.asciidoc index 54d69583e990c..a41abc35b5343 100644 --- a/docs/reference/query-dsl/prefix-query.asciidoc +++ b/docs/reference/query-dsl/prefix-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-prefix-query]] -=== Prefix Query +=== Prefix query +++++ +Prefix +++++ Matches documents that have fields containing terms with a specified prefix (*not analyzed*). The prefix query maps to Lucene `PrefixQuery`. diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index c293bf5457b9c..967dd906eec3f 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-query-string-query]] -=== Query String Query +=== Query string query +++++ +Query string +++++ A query that uses a query parser in order to parse its content. Here is an example: diff --git a/docs/reference/query-dsl/query_filter_context.asciidoc b/docs/reference/query-dsl/query_filter_context.asciidoc index 6e7177ed19c18..c7065948a50d5 100644 --- a/docs/reference/query-dsl/query_filter_context.asciidoc +++ b/docs/reference/query-dsl/query_filter_context.asciidoc @@ -13,7 +13,7 @@ document matches, the query clause also calculates a `_score` representing how well the document matches, relative to other documents. Query context is in effect whenever a query clause is passed to a `query` parameter, -such as the `query` parameter in the <> API. +such as the `query` parameter in the <> API. -- Filter context:: diff --git a/docs/reference/query-dsl/range-query.asciidoc b/docs/reference/query-dsl/range-query.asciidoc index 27db882fe1dd3..0cbc1895cd7f7 100644 --- a/docs/reference/query-dsl/range-query.asciidoc +++ b/docs/reference/query-dsl/range-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-range-query]] -=== Range Query +=== Range query +++++ +Range +++++ Returns documents that contain terms within a provided range. diff --git a/docs/reference/query-dsl/rank-feature-query.asciidoc b/docs/reference/query-dsl/rank-feature-query.asciidoc index fe23c5f3ec26f..18e4562a90adb 100644 --- a/docs/reference/query-dsl/rank-feature-query.asciidoc +++ b/docs/reference/query-dsl/rank-feature-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-rank-feature-query]] -=== Rank Feature Query +=== Rank feature query +++++ +Rank feature +++++ The `rank_feature` query is a specialized query that only works on <> fields and <> fields. diff --git a/docs/reference/query-dsl/regexp-query.asciidoc b/docs/reference/query-dsl/regexp-query.asciidoc index b50b2ca8f5055..1df4107f6ef7f 100644 --- a/docs/reference/query-dsl/regexp-query.asciidoc +++ b/docs/reference/query-dsl/regexp-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-regexp-query]] -=== Regexp Query +=== Regexp query +++++ +Regexp +++++ The `regexp` query allows you to use regular expression term queries. See <> for details of the supported regular expression language. diff --git a/docs/reference/query-dsl/script-query.asciidoc b/docs/reference/query-dsl/script-query.asciidoc index 917991e3211c6..e8c8f3b7cc1ae 100644 --- a/docs/reference/query-dsl/script-query.asciidoc +++ b/docs/reference/query-dsl/script-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-script-query]] -=== Script Query +=== Script query +++++ +Script +++++ A query allowing to define <> as queries. They are typically used in a filter diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index 6b7411667f286..571bf2d062979 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-script-score-query]] -=== Script Score Query +=== Script score query +++++ +Script score +++++ The `script_score` allows you to modify the score of documents that are retrieved by a query. This can be useful if, for example, a score @@ -11,7 +14,6 @@ a function to be used to compute a new score for each document returned by the query. For more information on scripting see <>. - Here is an example of using `script_score` to assign each matched document a score equal to the number of likes divided by 10: @@ -32,7 +34,6 @@ GET /_search } -------------------------------------------------- // CONSOLE -// TEST[setup:twitter] NOTE: The values returned from `script_score` cannot be negative. In general, Lucene requires the scores produced by queries to be non-negative in order to @@ -76,140 +77,6 @@ to be the most efficient by using the internal mechanisms. -------------------------------------------------- // NOTCONSOLE -[role="xpack"] -[testenv="basic"] -[[vector-functions]] -===== Functions for vector fields - -experimental[] - -These functions are used for -for <> and -<> fields. - -NOTE: During vector functions' calculation, all matched documents are -linearly scanned. Thus, expect the query time grow linearly -with the number of matched documents. For this reason, we recommend -to limit the number of matched documents with a `query` parameter. - -For dense_vector fields, `cosineSimilarity` calculates the measure of -cosine similarity between a given query vector and document vectors. - -[source,js] --------------------------------------------------- -{ - "query": { - "script_score": { - "query": { - "match_all": {} - }, - "script": { - "source": "cosineSimilarity(params.query_vector, doc['my_dense_vector']) + 1.0", <1> - "params": { - "query_vector": [4, 3.4, -0.2] <2> - } - } - } - } -} --------------------------------------------------- -// NOTCONSOLE -<1> The script adds 1.0 to the cosine similarity to prevent the score from being negative. -<2> To take advantage of the script optimizations, provide a query vector as a script parameter. - -Similarly, for sparse_vector fields, `cosineSimilaritySparse` calculates cosine similarity -between a given query vector and document vectors. - -[source,js] --------------------------------------------------- -{ - "query": { - "script_score": { - "query": { - "match_all": {} - }, - "script": { - "source": "cosineSimilaritySparse(params.query_vector, doc['my_sparse_vector']) + 1.0", - "params": { - "query_vector": {"2": 0.5, "10" : 111.3, "50": -1.3, "113": 14.8, "4545": 156.0} - } - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - -For dense_vector fields, `dotProduct` calculates the measure of -dot product between a given query vector and document vectors. - -[source,js] --------------------------------------------------- -{ - "query": { - "script_score": { - "query": { - "match_all": {} - }, - "script": { - "source": """ - double value = dotProduct(params.query_vector, doc['my_vector']); - return sigmoid(1, Math.E, -value); <1> - """, - "params": { - "query_vector": [4, 3.4, -0.2] - } - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - -<1> Using the standard sigmoid function prevents scores from being negative. - -Similarly, for sparse_vector fields, `dotProductSparse` calculates dot product -between a given query vector and document vectors. - -[source,js] --------------------------------------------------- -{ - "query": { - "script_score": { - "query": { - "match_all": {} - }, - "script": { - "source": """ - double value = dotProductSparse(params.query_vector, doc['my_sparse_vector']); - return sigmoid(1, Math.E, -value); - """, - "params": { - "query_vector": {"2": 0.5, "10" : 111.3, "50": -1.3, "113": 14.8, "4545": 156.0} - } - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - -NOTE: If a document doesn't have a value for a vector field on which -a vector function is executed, an error will be thrown. - -You can check if a document has a value for the field `my_vector` by -`doc['my_vector'].size() == 0`. Your overall script can look like this: - -[source,js] --------------------------------------------------- -"source": "doc['my_vector'].size() == 0 ? 0 : cosineSimilarity(params.queryVector, doc['my_vector'])" --------------------------------------------------- -// NOTCONSOLE - -NOTE: If a document's dense vector field has a number of dimensions -different from the query's vector, an error will be thrown. - - [[random-score-function]] ===== Random score function `random_score` function generates scores that are uniformly distributed @@ -323,6 +190,9 @@ You can read more about decay functions NOTE: Decay functions on dates are limited to dates in the default format and default time zone. Also calculations with `now` are not supported. +===== Functions for vector fields +<> are accessible through +`script_score` query. ==== Faster alternatives Script Score Query calculates the score for every hit (matching document). @@ -422,5 +292,4 @@ through a script: Script Score query has equivalent <> that can be used in script. - - +include::{es-repo-dir}/vectors/vector-functions.asciidoc[] diff --git a/docs/reference/query-dsl/simple-query-string-query.asciidoc b/docs/reference/query-dsl/simple-query-string-query.asciidoc index 113db56072df6..cb8e302e259be 100644 --- a/docs/reference/query-dsl/simple-query-string-query.asciidoc +++ b/docs/reference/query-dsl/simple-query-string-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-simple-query-string-query]] -=== Simple Query String Query +=== Simple query string query +++++ +Simple query string +++++ A query that uses the SimpleQueryParser to parse its context. Unlike the regular `query_string` query, the `simple_query_string` query will never diff --git a/docs/reference/query-dsl/span-containing-query.asciidoc b/docs/reference/query-dsl/span-containing-query.asciidoc index 638c699923305..7b5fb0ba7aeae 100644 --- a/docs/reference/query-dsl/span-containing-query.asciidoc +++ b/docs/reference/query-dsl/span-containing-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-containing-query]] -=== Span Containing Query +=== Span containing query +++++ +Span containing +++++ Returns matches which enclose another span query. The span containing query maps to Lucene `SpanContainingQuery`. Here is an example: diff --git a/docs/reference/query-dsl/span-field-masking-query.asciidoc b/docs/reference/query-dsl/span-field-masking-query.asciidoc index d9e96635a29c4..67ef67e595187 100644 --- a/docs/reference/query-dsl/span-field-masking-query.asciidoc +++ b/docs/reference/query-dsl/span-field-masking-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-field-masking-query]] -=== Span Field Masking Query +=== Span field masking query +++++ +Span field masking +++++ Wrapper to allow span queries to participate in composite single-field span queries by 'lying' about their search field. The span field masking query maps to Lucene's `SpanFieldMaskingQuery` diff --git a/docs/reference/query-dsl/span-first-query.asciidoc b/docs/reference/query-dsl/span-first-query.asciidoc index dba7932661deb..a5d23071f796a 100644 --- a/docs/reference/query-dsl/span-first-query.asciidoc +++ b/docs/reference/query-dsl/span-first-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-first-query]] -=== Span First Query +=== Span first query +++++ +Span first +++++ Matches spans near the beginning of a field. The span first query maps to Lucene `SpanFirstQuery`. Here is an example: diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index f79283b7fa4f9..c645d45e237dd 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-multi-term-query]] -=== Span Multi Term Query +=== Span multi-term query +++++ +Span multi-term +++++ The `span_multi` query allows you to wrap a `multi term query` (one of wildcard, fuzzy, prefix, range or regexp query) as a `span query`, so diff --git a/docs/reference/query-dsl/span-near-query.asciidoc b/docs/reference/query-dsl/span-near-query.asciidoc index e69be783e3d60..acb94a318815b 100644 --- a/docs/reference/query-dsl/span-near-query.asciidoc +++ b/docs/reference/query-dsl/span-near-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-near-query]] -=== Span Near Query +=== Span near query +++++ +Span near +++++ Matches spans which are near one another. One can specify _slop_, the maximum number of intervening unmatched positions, as well as whether diff --git a/docs/reference/query-dsl/span-not-query.asciidoc b/docs/reference/query-dsl/span-not-query.asciidoc index 29a803a74767b..561f4eb1eb1ed 100644 --- a/docs/reference/query-dsl/span-not-query.asciidoc +++ b/docs/reference/query-dsl/span-not-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-not-query]] -=== Span Not Query +=== Span not query +++++ +Span not +++++ Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens diff --git a/docs/reference/query-dsl/span-or-query.asciidoc b/docs/reference/query-dsl/span-or-query.asciidoc index 470935d6f5c4a..e1c0c9263a80f 100644 --- a/docs/reference/query-dsl/span-or-query.asciidoc +++ b/docs/reference/query-dsl/span-or-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-or-query]] -=== Span Or Query +=== Span or query +++++ +Span or +++++ Matches the union of its span clauses. The span or query maps to Lucene `SpanOrQuery`. Here is an example: diff --git a/docs/reference/query-dsl/span-queries.asciidoc b/docs/reference/query-dsl/span-queries.asciidoc index 7dc65433432ec..cc14b0ee4935a 100644 --- a/docs/reference/query-dsl/span-queries.asciidoc +++ b/docs/reference/query-dsl/span-queries.asciidoc @@ -15,62 +15,55 @@ Span queries cannot be mixed with non-span queries (with the exception of the `s The queries in this group are: -<>:: +<>:: +Accepts a list of span queries, but only returns those spans which also match a second span query. -The equivalent of the <> but for use with -other span queries. +<>:: +Allows queries like `span-near` or `span-or` across different fields. -<>:: +<>:: +Accepts another span query whose matches must appear within the first N +positions of the field. +<>:: Wraps a <>, <>, <>, <>, <>, or <> query. -<>:: - -Accepts another span query whose matches must appear within the first N -positions of the field. - <>:: - Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. -<>:: +<>:: +Wraps another span query, and excludes any documents which match that query. +<>:: Combines multiple span queries -- returns documents which match any of the specified queries. -<>:: - -Wraps another span query, and excludes any documents which match that query. - -<>:: +<>:: -Accepts a list of span queries, but only returns those spans which also match a second span query. +The equivalent of the <> but for use with +other span queries. <>:: - The result from a single span query is returned as long is its span falls within the spans returned by a list of other span queries. -<>:: - -Allows queries like `span-near` or `span-or` across different fields. -include::span-term-query.asciidoc[] +include::span-containing-query.asciidoc[] -include::span-multi-term-query.asciidoc[] +include::span-field-masking-query.asciidoc[] include::span-first-query.asciidoc[] -include::span-near-query.asciidoc[] +include::span-multi-term-query.asciidoc[] -include::span-or-query.asciidoc[] +include::span-near-query.asciidoc[] include::span-not-query.asciidoc[] -include::span-containing-query.asciidoc[] +include::span-or-query.asciidoc[] -include::span-within-query.asciidoc[] +include::span-term-query.asciidoc[] -include::span-field-masking-query.asciidoc[] +include::span-within-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/span-term-query.asciidoc b/docs/reference/query-dsl/span-term-query.asciidoc index 1b12a3c35f796..ba31b471ef2ae 100644 --- a/docs/reference/query-dsl/span-term-query.asciidoc +++ b/docs/reference/query-dsl/span-term-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-term-query]] -=== Span Term Query +=== Span term query +++++ +Span term +++++ Matches spans containing a term. The span term query maps to Lucene `SpanTermQuery`. Here is an example: diff --git a/docs/reference/query-dsl/span-within-query.asciidoc b/docs/reference/query-dsl/span-within-query.asciidoc index b70835c4134b4..f344f32b21854 100644 --- a/docs/reference/query-dsl/span-within-query.asciidoc +++ b/docs/reference/query-dsl/span-within-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-within-query]] -=== Span Within Query +=== Span within query +++++ +Span within +++++ Returns matches which are enclosed inside another span query. The span within query maps to Lucene `SpanWithinQuery`. Here is an example: diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index b7275ac2cee3f..c2e95a4d818cf 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -4,50 +4,44 @@ This group contains queries which do not fit into the other groups: -<>:: +<>:: +A query that computes scores based on the dynamically computed distances +between the origin and documents' date, date_nanos and geo_point fields. +It is able to efficiently skip non-competitive hits. +<>:: This query finds documents which are similar to the specified text, document, or collection of documents. -<>:: - -This query allows a script to act as a filter. Also see the -<>. - -<>:: - -A query that allows to modify the score of a sub-query with a script. - <>:: - This query finds queries that are stored as documents that match with the specified document. <>:: - A query that computes scores based on the values of numeric features and is able to efficiently skip non-competitive hits. -<>:: +<>:: +This query allows a script to act as a filter. Also see the +<>. -A query that computes scores based on the dynamically computed distances -between the origin and documents' date, date_nanos and geo_point fields. -It is able to efficiently skip non-competitive hits. +<>:: +A query that allows to modify the score of a sub-query with a script. <>:: - A query that accepts other queries as json or yaml string. -include::mlt-query.asciidoc[] -include::script-query.asciidoc[] +include::distance-feature-query.asciidoc[] -include::script-score-query.asciidoc[] +include::mlt-query.asciidoc[] include::percolate-query.asciidoc[] include::rank-feature-query.asciidoc[] -include::distance-feature-query.asciidoc[] +include::script-query.asciidoc[] + +include::script-score-query.asciidoc[] -include::wrapper-query.asciidoc[] +include::wrapper-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index 5941aea76c2fa..fd3f570916270 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -20,58 +20,58 @@ Term-level queries still normalize search terms for `keyword` fields with the [[term-level-query-types]] === Types of term-level queries -<>:: -Returns documents that contain an exact term in a provided field. - -<>:: -Returns documents that contain one or more exact terms in a provided field. - -<>:: -Returns documents that contain a minimum number of exact terms in a provided -field. You can define the minimum number of matching terms using a field or -script. - -<>:: -Returns documents that contain terms within a provided range. - <>:: Returns documents that contain any indexed value for a field. +<>:: +Returns documents that contain terms similar to the search term. {es} measures +similarity, or fuzziness, using a +http://en.wikipedia.org/wiki/Levenshtein_distance[Levenshtein edit distance]. + +<>:: +Returns documents based on their <>. + <>:: Returns documents that contain a specific prefix in a provided field. -<>:: -Returns documents that contain terms matching a wildcard pattern. +<>:: +Returns documents that contain terms within a provided range. <>:: Returns documents that contain terms matching a https://en.wikipedia.org/wiki/Regular_expression[regular expression]. -<>:: -Returns documents that contain terms similar to the search term. {es} measures -similarity, or fuzziness, using a -http://en.wikipedia.org/wiki/Levenshtein_distance[Levenshtein edit distance]. +<>:: +Returns documents that contain an exact term in a provided field. -<>:: -Returns documents based on their <>. +<>:: +Returns documents that contain one or more exact terms in a provided field. +<>:: +Returns documents that contain a minimum number of exact terms in a provided +field. You can define the minimum number of matching terms using a field or +script. -include::term-query.asciidoc[] +<>:: +Returns documents that contain terms matching a wildcard pattern. -include::terms-query.asciidoc[] -include::terms-set-query.asciidoc[] +include::exists-query.asciidoc[] -include::range-query.asciidoc[] +include::fuzzy-query.asciidoc[] -include::exists-query.asciidoc[] +include::ids-query.asciidoc[] include::prefix-query.asciidoc[] -include::wildcard-query.asciidoc[] +include::range-query.asciidoc[] include::regexp-query.asciidoc[] -include::fuzzy-query.asciidoc[] +include::term-query.asciidoc[] -include::ids-query.asciidoc[] +include::terms-query.asciidoc[] + +include::terms-set-query.asciidoc[] + +include::wildcard-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 25da70d0cf1e1..bb87d9a905d48 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-term-query]] -=== Term Query +=== Term query +++++ +Term +++++ Returns documents that contain an *exact* term in a provided field. diff --git a/docs/reference/query-dsl/terms-query.asciidoc b/docs/reference/query-dsl/terms-query.asciidoc index 53ae0163f804f..79b2dcb1afed1 100644 --- a/docs/reference/query-dsl/terms-query.asciidoc +++ b/docs/reference/query-dsl/terms-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-terms-query]] -=== Terms Query +=== Terms query +++++ +Terms +++++ Returns documents that contain one or more *exact* terms in a provided field. @@ -66,7 +69,7 @@ increases the relevance score. [[query-dsl-terms-query-highlighting]] ===== Highlighting `terms` queries -<> is best-effort only. {es} may not +<> is best-effort only. {es} may not return highlight results for `terms` queries depending on: * Highlighter type diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 0f097e494bfda..0dba96241f82a 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-terms-set-query]] -=== Terms Set Query +=== Terms set query +++++ +Terms set +++++ Returns documents that contain a minimum number of *exact* terms in a provided field. diff --git a/docs/reference/query-dsl/wildcard-query.asciidoc b/docs/reference/query-dsl/wildcard-query.asciidoc index b2e8eb0adf772..5b455ec5f529d 100644 --- a/docs/reference/query-dsl/wildcard-query.asciidoc +++ b/docs/reference/query-dsl/wildcard-query.asciidoc @@ -1,5 +1,9 @@ [[query-dsl-wildcard-query]] -=== Wildcard Query +=== Wildcard query +++++ +Wildcard +++++ + Returns documents that contain terms matching a wildcard pattern. A wildcard operator is a placeholder that matches one or more characters. For diff --git a/docs/reference/query-dsl/wrapper-query.asciidoc b/docs/reference/query-dsl/wrapper-query.asciidoc index 4ffef5bfc6bcc..010e086056d8f 100644 --- a/docs/reference/query-dsl/wrapper-query.asciidoc +++ b/docs/reference/query-dsl/wrapper-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-wrapper-query]] -=== Wrapper Query +=== Wrapper query +++++ +Wrapper +++++ A query that accepts any other query as base64 encoded string. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 4d15ee7e25503..96ef505e0060a 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -620,4 +620,166 @@ without any configuration, if the total number of hits is not tracked. [role="exclude",id="xpack-api"] === X-Pack APIs -{es} {xpack} APIs are now documented in <>. \ No newline at end of file +{es} {xpack} APIs are now documented in <>. + +[role="exclude",id="ml-calendar-resource"]] +=== Calendar resources + +See <> and +{stack-ov}/ml-calendars.html[Calendars and scheduled events]. + +[role="exclude",id="ml-filter-resource"] +=== Filter resources + +See <> and +{stack-ov}/ml-rules.html[Machine learning custom rules]. + +[role="exclude",id="ml-event-resource"] +=== Scheduled event resources + +See <> and +{stack-ov}/ml-calendars.html[Calendars and scheduled events]. + +[role="exclude",id="index-apis"] +=== Index APIs +{es} index APIs are now documented in <>. + +[role="exclude",id="search-request-docvalue-fields"] +=== Doc value fields parameter for request body search API +See <>. + +[role="exclude",id="search-request-explain"] +=== Explain parameter for request body search API +See <>. + +[role="exclude",id="search-request-collapse"] +=== Collapse parameter for request body search API +See <>. + +[role="exclude",id="search-request-from-size"] +=== From and size parameters for request body search API +See <>. + +[role="exclude",id="search-request-highlighting"] +=== Highlight parameter for request body search API +See <>. + +[role="exclude",id="search-request-index-boost"] +=== Index boost parameter for request body search API +See <>. + +[role="exclude",id="search-request-inner-hits"] +=== Inner hits parameter for request body search API +See <>. + +[role="exclude",id="search-request-min-score"] +=== Minimum score parameter for request body search API +See <>. + +[role="exclude",id="search-request-named-queries-and-filters"] +=== Named query parameter for request body search API +See <>. + +[role="exclude",id="search-request-post-filter"] +=== Post filter parameter for request body search API +See <>. + +[role="exclude",id="search-request-preference"] +=== Preference parameter for request body search API +See <>. + +[role="exclude",id="search-request-query"] +=== Query parameter for request body search API +See <>. + +[role="exclude",id="search-request-rescore"] +=== Rescoring parameter for request body search API +See <>. + +[role="exclude",id="search-request-script-fields"] +=== Script fields parameter for request body search API +See <>. + +[role="exclude",id="search-request-scroll"] +=== Scroll parameter for request body search API +See <>. + +[role="exclude",id="search-request-search-after"] +=== Search after parameter for request body search API +See <>. + +[role="exclude",id="search-request-search-type"] +=== Search type parameter for request body search API +See <>. + +[role="exclude",id="search-request-seq-no-primary-term"] +=== Sequence numbers and primary terms parameter for request body search API +See <>. + +[role="exclude",id="search-request-sort"] +=== Sort parameter for request body search API +See <>. + +[role="exclude",id="search-request-source-filtering"] +=== Source filtering parameter for request body search API +See <>. + +[role="exclude",id="search-request-stored-fields"] +=== Stored fields parameter for request body search API +See <>. + +[role="exclude",id="search-request-track-total-hits"] +=== Track total hits parameter for request body search API +See <>. + +[role="exclude",id="search-request-version"] +=== Version parameter for request body search API +See <>. + +[role="exclude",id="search-suggesters-term"] +=== Term suggester +See <>. + +[role="exclude",id="search-suggesters-phrase"] +=== Phrase suggester +See <>. + +[role="exclude",id="search-suggesters-completion"] +=== Completion suggester +See <>. + +[role="exclude",id="suggester-context"] +=== Context suggester +See <>. + +[role="exclude",id="returning-suggesters-type"] +=== Return suggester type +See <>. + +[role="exclude",id="search-profile-queries"] +=== Profiling queries +See <>. + +[role="exclude",id="search-profile-aggregations"] +=== Profiling aggregations +See <>. + +[role="exclude",id="search-profile-considerations"] +=== Profiling considerations +See <>. + +[role="exclude",id="_explain_analyze"] +=== Explain analyze API +See <>. + +[role="exclude",id="indices-synced-flush"] +=== Synced flush API +See <>. + +[role="exclude",id="_repositories"] +=== Snapshot repositories +See <>. + +[role="exclude",id="_snapshot"] +=== Snapshot +See <>. diff --git a/docs/reference/rest-api/defs.asciidoc b/docs/reference/rest-api/defs.asciidoc index 6e990f42e9a69..efeb6c6092305 100644 --- a/docs/reference/rest-api/defs.asciidoc +++ b/docs/reference/rest-api/defs.asciidoc @@ -5,25 +5,23 @@ These resource definitions are used in APIs related to {ml-features} and {security-features} and in {kib} advanced {ml} job configuration options. -* <> * <> * <> +* <> * <> -* <> -* <> -* <> -* <> -* <> +* <> +* <> +* <> +* <> +* <> * <> -* <> -include::{es-repo-dir}/ml/apis/calendarresource.asciidoc[] -include::{es-repo-dir}/ml/apis/datafeedresource.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/apis/datafeedresource.asciidoc[] +include::{es-repo-dir}/ml/df-analytics/apis/dfanalyticsresources.asciidoc[] include::{es-repo-dir}/data-frames/apis/transformresource.asciidoc[] -include::{es-repo-dir}/ml/apis/filterresource.asciidoc[] -include::{es-repo-dir}/ml/apis/jobresource.asciidoc[] -include::{es-repo-dir}/ml/apis/jobcounts.asciidoc[] -include::{es-repo-dir}/ml/apis/snapshotresource.asciidoc[] +include::{es-repo-dir}/ml/df-analytics/apis/evaluateresources.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/apis/jobresource.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/apis/jobcounts.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/apis/snapshotresource.asciidoc[] include::{xes-repo-dir}/rest-api/security/role-mapping-resources.asciidoc[] -include::{es-repo-dir}/ml/apis/resultsresource.asciidoc[] -include::{es-repo-dir}/ml/apis/eventresource.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/apis/resultsresource.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 8bb7053ecfe93..7be3b71860bda 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -10,33 +10,46 @@ directly to configure and access {es} features. We are working on including more {es} APIs in this section. Some content might not be included yet. -* <> +* <> +* <> +* <> * <> * <> +* <> * <> -* <> -* <> +* <> * <> +* <> * <> -* <> +* <> +* <> +* <> +* <> +* <> +* <> * <> +* <> * <> -* <> -* <> -- - -include::info.asciidoc[] +include::{es-repo-dir}/api-conventions.asciidoc[] +include::{es-repo-dir}/cat.asciidoc[] +include::{es-repo-dir}/cluster.asciidoc[] include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] include::{es-repo-dir}/data-frames/apis/index.asciidoc[] +include::{es-repo-dir}/docs.asciidoc[] include::{es-repo-dir}/graph/explore.asciidoc[] +include::{es-repo-dir}/indices.asciidoc[] include::{es-repo-dir}/ilm/apis/ilm-api.asciidoc[] -include::{es-repo-dir}/indices/apis/index.asciidoc[] +include::info.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] +include::{es-repo-dir}/ml/anomaly-detection/apis/ml-api.asciidoc[] +include::{es-repo-dir}/ml/df-analytics/apis/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] -include::{es-repo-dir}/ml/apis/ml-api.asciidoc[] +include::{es-repo-dir}/indices/apis/reload-analyzers.asciidoc[] include::{es-repo-dir}/rollup/rollup-api.asciidoc[] +include::{es-repo-dir}/search.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] +include::{es-repo-dir}/ilm/apis/slm-api.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] -include::{es-repo-dir}/indices/apis/reload-analyzers.asciidoc[] include::defs.asciidoc[] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index adbf5f01979a0..d91be315e86e3 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -3,15 +3,17 @@ [[info-api]] == Info API -The info API provides general information about the installed {xpack} features. +Provides general information about the installed {xpack} features. -[float] -=== Request +[discrete] +[[info-api-request]] +=== {api-request-title} `GET /_xpack` -[float] -=== Description +[discrete] +[[info-api-desc]] +=== {api-description-title} The information provided by this API includes: @@ -20,24 +22,22 @@ The information provided by this API includes: * Features Information - The features that are currently enabled and available under the current license. -[float] -=== Path Parameters +[discrete] +[[info-api-path-params]] +=== {api-path-parms-title} `categories`:: - (list) A comma-separated list of the information categories to include in the - response. For example, `build,license,features`. + (Optional, list) A comma-separated list of the information categories to + include in the response. For example, `build,license,features`. `human`:: - (boolean) Defines whether additional human-readable information is included in - the response. In particular, it adds descriptions and a tag line. The - default value is `true`. + (Optional, boolean) Defines whether additional human-readable information is + included in the response. In particular, it adds descriptions and a tag line. + The default value is `true`. -//=== Query Parameters - -//=== Authorization - -[float] -=== Examples +[discrete] +[[info-api--example]] +=== {api-examples-title} The following example queries the info API: @@ -75,6 +75,10 @@ Example response: "available" : true, "enabled" : true }, + "frozen_indices" : { + "available" : true, + "enabled" : true + }, "graph" : { "available" : true, "enabled" : true @@ -103,6 +107,10 @@ Example response: "available" : true, "enabled" : false }, + "spatial" : { + "available" : true, + "enabled" : true + }, "sql" : { "available" : true, "enabled" : true diff --git a/docs/reference/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc index d1ea03b6284d7..d6be3e4e5b69c 100644 --- a/docs/reference/rollup/api-quickref.asciidoc +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -17,12 +17,12 @@ Most rollup endpoints have the following base: [[rollup-api-jobs]] === /job/ -* {ref}/rollup-put-job.html[PUT /_rollup/job/+++]: Create a job -* {ref}/rollup-get-job.html[GET /_rollup/job]: List jobs -* {ref}/rollup-get-job.html[GET /_rollup/job/+++]: Get job details -* {ref}/rollup-start-job.html[POST /_rollup/job//_start]: Start a job -* {ref}/rollup-stop-job.html[POST /_rollup/job//_stop]: Stop a job -* {ref}/rollup-delete-job.html[DELETE /_rollup/job/+++]: Delete a job +* {ref}/rollup-put-job.html[PUT /_rollup/job/+++]: Create a {rollup-job} +* {ref}/rollup-get-job.html[GET /_rollup/job]: List {rollup-jobs} +* {ref}/rollup-get-job.html[GET /_rollup/job/+++]: Get {rollup-job} details +* {ref}/rollup-start-job.html[POST /_rollup/job//_start]: Start a {rollup-job} +* {ref}/rollup-stop-job.html[POST /_rollup/job//_stop]: Stop a {rollup-job} +* {ref}/rollup-delete-job.html[DELETE /_rollup/job/+++]: Delete a {rollup-job} [float] [[rollup-api-data]] diff --git a/docs/reference/rollup/apis/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc index 18c353ac73674..85246d518348b 100644 --- a/docs/reference/rollup/apis/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -1,27 +1,48 @@ [role="xpack"] [testenv="basic"] [[rollup-delete-job]] -=== Delete job API +=== Delete {rollup-jobs} API +[subs="attributes"] ++++ -Delete job +Delete {rollup-jobs} ++++ +Deletes an existing {rollup-job}. + experimental[] -This API deletes an existing rollup job. A job must be *stopped* first before it can be deleted. Attempting to delete -a started job will result in an error. Similarly, attempting to delete a nonexistent job will throw an exception. +[[rollup-delete-job-request]] +==== {api-request-title} + +`DELETE _rollup/job/` + +[[rollup-delete-job-prereqs]] +==== {api-prereq-title} -.Deleting the job does not delete rolled up data -********************************** -When a job is deleted, that only removes the process that is actively monitoring and rolling up data. -It does not delete any previously rolled up data. This is by design; a user may wish to roll up a static dataset. Because -the dataset is static, once it has been fully rolled up there is no need to keep the indexing Rollup job around (as there -will be no new data). So the job may be deleted, leaving behind the rolled up data for analysis. +* If the {es} {security-features} are enabled, you must have `manage` or +`manage_rollup` cluster privileges to use this API. For more information, see +{stack-ov}/security-privileges.html[Security privileges]. -If you wish to also remove the rollup data, and the rollup index only contains the data for a single job, you can simply -delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a Delete-By-Query that -targets the Rollup job's ID in the rollup index: +[[rollup-delete-job-desc]] +==== {api-description-title} +A job must be *stopped* first before it can be deleted. If you attempt to delete +a started job, an error occurs. Similarly, if you attempt to delete a +nonexistent job, an exception occurs. + +[IMPORTANT] +=============================== +When a job is deleted, that only removes the process that is actively monitoring +and rolling up data. It does not delete any previously rolled up data. This is +by design; a user may wish to roll up a static dataset. Because the dataset is +static, once it has been fully rolled up there is no need to keep the indexing +rollup job around (as there will be no new data). So the job can be deleted, +leaving behind the rolled up data for analysis. + +If you wish to also remove the rollup data, and the rollup index only contains +the data for a single job, you can simply delete the whole rollup index. If the +rollup index stores data from several jobs, you must issue a delete-by-query +that targets the rollup job's ID in the rollup index. [source,js] -------------------------------------------------- @@ -35,32 +56,23 @@ POST my_rollup_index/_delete_by_query } -------------------------------------------------- // NOTCONSOLE +=============================== -********************************** -==== Request - -`DELETE _rollup/job/` - -//===== Description - -==== Path Parameters - -`job_id` (required):: - (string) Identifier for the job - - -==== Request Body - -There is no request body for the Delete Job API. +[[rollup-delete-job-path-params]] +==== {api-path-parms-title} -==== Authorization +``:: + (Required, string) Identifier for the job. -You must have `manage` or `manage_rollup` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +[[rollup-delete-job-response-codes]] +==== {api-response-codes-title} +`404` (Missing resources):: + This code indicates that there are no resources that match the request. It + occurs if you try to delete a job that doesn't exist. -==== Examples +[[rollup-delete-job-example]] +==== {api-example-title} If we have a rollup job named `sensor`, it can be deleted with: @@ -80,34 +92,3 @@ Which will return the response: } ---- // TESTRESPONSE - -If however we try to delete a job which doesn't exist: - -[source,js] --------------------------------------------------- -DELETE _rollup/job/does_not_exist --------------------------------------------------- -// CONSOLE -// TEST[catch:missing] - -A 404 `resource_not_found` exception will be thrown: - -[source,js] ----- -{ - "error" : { - "root_cause" : [ - { - "type" : "resource_not_found_exception", - "reason" : "the task with id [does_not_exist] doesn't exist", - "stack_trace": ... - } - ], - "type" : "resource_not_found_exception", - "reason" : "the task with id [does_not_exist] doesn't exist", - "stack_trace": ... - }, - "status": 404 -} ----- -// TESTRESPONSE[s/"stack_trace": .../"stack_trace": $body.$_path/] diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index eac71a48b4336..bba6a1714dd84 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -1,61 +1,74 @@ [role="xpack"] [testenv="basic"] [[rollup-put-job]] -=== Create job API +=== Create {rollup-jobs} API +[subs="attributes"] ++++ -Create job +Create {rollup-jobs} ++++ -experimental[] +Creates a {rollup-job}. -This API enables you to create a rollup job. The job will be created in a `STOPPED` state, and must be -started with the <>. +experimental[] -==== Request +[[sample-api-request]] +==== {api-request-title} `PUT _rollup/job/` -//===== Description +[[sample-api-prereqs]] +==== {api-prereq-title} -==== Path Parameters +* If the {es} {security-features} are enabled, you must have `manage` or +`manage_rollup` cluster privileges to use this API. For more information, see +{stack-ov}/security-privileges.html[Security privileges]. -`job_id` (required):: - (string) Identifier for the job +[[sample-api-desc]] +==== {api-description-title} +Jobs are created in a `STOPPED` state. You can start them with the +<>. -==== Request Body +[[sample-api-path-params]] +==== {api-path-parms-title} -`index_pattern` (required):: - (string) The index, or index pattern, that you wish to rollup. Supports wildcard-style patterns (`logstash-*`). +`job_id`:: + (Required, string) Identifier for the {rollup-job}. -`rollup_index` (required):: - (string) The index that you wish to store rollup results into. Can be shared with other rollup jobs. +[[sample-api-request-body]] +==== {api-request-body-title} -`cron` (required):: - (string) A cron string which defines when the rollup job should be executed. +`cron`:: + (Required, string) A cron string which defines when the {rollup-job} should be executed. -`page_size` (required):: - (int) The number of bucket results that should be processed on each iteration of the rollup indexer. A larger value - will tend to execute faster, but will require more memory during processing. +`groups`:: + (Required, object) Defines the grouping fields that are defined for this + {rollup-job}. See <>. -`groups` (required):: - (object) Defines the grouping fields that are defined for this rollup job. See <>. +`index_pattern`:: + (Required, string) The index or index pattern to roll up. Supports + wildcard-style patterns (`logstash-*`). `metrics`:: - (object) Defines the metrics that should be collected for each grouping tuple. See <>. + (Optional, object) Defines the metrics to collect for each grouping tuple. See + <>. -For more details about the job configuration, see <>. +`page_size`:: + (Required, integer) The number of bucket results that are processed on each + iteration of the rollup indexer. A larger value tends to execute faster, but + requires more memory during processing. -==== Authorization - -You must have `manage` or `manage_rollup` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +`rollup_index`:: + (Required, string) The index that contains the rollup results. The index can + be shared with other {rollup-jobs}. +For more details about the job configuration, see <>. -==== Examples +[[sample-api-example]] +==== {api-example-title} -The following example creates a rollup job named "sensor", targeting the "sensor-*" index pattern: +The following example creates a {rollup-job} named "sensor", targeting the +"sensor-*" index pattern: [source,js] -------------------------------------------------- diff --git a/docs/reference/rollup/apis/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc index 241d070a670a0..29d413c635978 100644 --- a/docs/reference/rollup/apis/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -1,41 +1,51 @@ [role="xpack"] [testenv="basic"] [[rollup-start-job]] -=== Start rollup job API +=== Start {rollup-jobs} API +[subs="attributes"] ++++ -Start job +Start {rollup-jobs} ++++ -experimental[] +Starts an existing, stopped {rollup-job}. -This API starts an existing, stopped rollup job. If the job does not exist an exception will be thrown. -Starting an already started job has no action. +experimental[] -==== Request +[[rollup-start-job-request]] +==== {api-request-title} `POST _rollup/job//_start` -//===== Description - -==== Path Parameters +[[rollup-start-job-prereqs]] +==== {api-prereq-title} -`job_id` (required):: - (string) Identifier for the job +* You must have `manage` or `manage_rollup` cluster privileges to use this API. +For more information, see +{stack-ov}/security-privileges.html[Security privileges]. +[[rollup-start-job-desc]] +==== {api-description-title} -==== Request Body +If you try to start a job that does not exist, an exception occurs. If you try +to start a job that is already started, nothing happens. -There is no request body for the Start Job API. +[[rollup-start-job-path-params]] +==== {api-path-parms-title} -==== Authorization +``:: + (Required, string) Identifier for the {rollup-job}. + +[[rollup-start-job-response-codes]] +==== {api-response-codes-title} -You must have `manage` or `manage_rollup` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. + `404` (Missing resources):: + This code indicates that there are no resources that match the request. It + occurs if you try to start a job that doesn't exist. -==== Examples +[[rollup-start-job-examples]] +==== {api-examples-title} -If we have already created a rollup job named `sensor`, it can be started with: +If we have already created a {rollup-job} named `sensor`, it can be started with: [source,js] -------------------------------------------------- @@ -52,35 +62,4 @@ Which will return the response: "started": true } ---- -// TESTRESPONSE - -If however we try to start a job which doesn't exist: - -[source,js] --------------------------------------------------- -POST _rollup/job/does_not_exist/_start --------------------------------------------------- -// CONSOLE -// TEST[catch:missing] - -A 404 `resource_not_found` exception will be thrown: - -[source,js] ----- -{ - "error" : { - "root_cause" : [ - { - "type" : "resource_not_found_exception", - "reason" : "Task for Rollup Job [does_not_exist] not found", - "stack_trace": ... - } - ], - "type" : "resource_not_found_exception", - "reason" : "Task for Rollup Job [does_not_exist] not found", - "stack_trace": ... - }, - "status": 404 -} ----- -// TESTRESPONSE[s/"stack_trace": \.\.\./"stack_trace": $body.$_path/] +// TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/rollup/apis/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc index 35162246a5fbb..36c963d53fe6d 100644 --- a/docs/reference/rollup/apis/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -1,108 +1,75 @@ [role="xpack"] [testenv="basic"] [[rollup-stop-job]] -=== Stop rollup job API +=== Stop {rollup-jobs} API +[subs="attributes"] ++++ -Stop job +Stop {rollup-jobs} ++++ -experimental[] +Stops an existing, started {rollup-job}. -This API stops an existing, started rollup job. If the job does not exist an exception will be thrown. -Stopping an already stopped job has no action. +experimental[] -==== Request +[[rollup-stop-job-request]] +==== {api-request-title} `POST _rollup/job//_stop` -//===== Description - -==== Path Parameters - -`job_id` (required):: - (string) Identifier for the job - -==== Query Parameters - -`wait_for_completion` (optional):: - (boolean) if set to true, causes the API to block until the indexer state completely stops. If set to false, the - API returns immediately and the indexer will be stopped asynchronously in the background. Defaults to `false`. +[[rollup-stop-job-prereqs]] +==== {api-prereq-title} -`timeout` (optional):: - (TimeValue) if `wait_for_completion=true`, the API will block for (at maximum) - the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API - will throw a timeout exception. Note: even if a timeout exception is thrown, the stop request is still processing and - will eventually move the job to `STOPPED`. The timeout simply means the API call itself timed out while waiting - for the status change. Defaults to `30s` - -==== Request Body - -There is no request body for the Stop Job API. - -==== Authorization - -You must have `manage` or `manage_rollup` cluster privileges to use this API. +* You must have `manage` or `manage_rollup` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. +[[rollup-stop-job-desc]] +===== {api-description-title} -==== Examples +If you try to stop a job that does not exist, an exception occurs. If you try +to stop a job that is already stopped, nothing happens. -If we have an already-started rollup job named `sensor`, it can be stopped with: +[[rollup-stop-job-path-parms]] +==== {api-path-parms-title} -[source,js] --------------------------------------------------- -POST _rollup/job/sensor/_stop --------------------------------------------------- -// CONSOLE -// TEST[setup:sensor_started_rollup_job] -// TEST[s/_stop/_stop?wait_for_completion=true&timeout=10s/] +``:: + (Required, string) Identifier for the {rollup-job}. -Which will return the response: +[[rollup-stop-job-query-parms]] +==== {api-query-parms-title} -[source,js] ----- -{ - "stopped": true -} ----- -// TESTRESPONSE +`timeout`:: + (Optional, TimeValue) If `wait_for_completion` is `true`, the API blocks for + (at maximum) the specified duration while waiting for the job to stop. If more + than `timeout` time has passed, the API throws a timeout exception. Defaults + to `30s`. ++ +-- +NOTE: Even if a timeout exception is thrown, the stop request is still +processing and eventually moves the job to `STOPPED`. The timeout simply means +the API call itself timed out while waiting for the status change. -If however we try to stop a job which doesn't exist: +-- + +`wait_for_completion`:: + (Optional, boolean) If set to `true`, causes the API to block until the + indexer state completely stops. If set to `false`, the API returns immediately + and the indexer is stopped asynchronously in the background. Defaults to + `false`. -[source,js] --------------------------------------------------- -POST _rollup/job/does_not_exist/_stop --------------------------------------------------- -// CONSOLE -// TEST[catch:missing] +[[rollup-stop-job-response-codes]] +==== {api-response-codes-title} -A 404 `resource_not_found` exception will be thrown: +`404` (Missing resources):: + This code indicates that there are no resources that match the request. It + occurs if you try to stop a job that doesn't exist. -[source,js] ----- -{ - "error" : { - "root_cause" : [ - { - "type" : "resource_not_found_exception", - "reason" : "Task for Rollup Job [does_not_exist] not found", - "stack_trace": ... - } - ], - "type" : "resource_not_found_exception", - "reason" : "Task for Rollup Job [does_not_exist] not found", - "stack_trace": ... - }, - "status": 404 -} ----- -// TESTRESPONSE[s/"stack_trace": .../"stack_trace": $body.$_path/] - -===== Waiting for the job to stop - -Since only a stopped job can be deleted, it can be useful to block the StopJob API until the indexer has fully -stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a `timeout`: +[[rollup-stop-job-examples]] +==== {api-examples-title} + +Since only a stopped job can be deleted, it can be useful to block the API until +the indexer has fully stopped. This is accomplished with the +`wait_for_completion` query parameter, and optionally a `timeout`: [source,js] @@ -112,7 +79,6 @@ POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s // CONSOLE // TEST[setup:sensor_started_rollup_job] -The parameter will block the API call from returning until either the job has moved to `STOPPED`, or the specified -time has elapsed. If the specified time elapses without the job moving to `STOPPED`, a timeout exception will be thrown. - -If `wait_for_completion=true` is specified without a `timeout`, a default timeout of 30 seconds is used. \ No newline at end of file +The parameter blocks the API call from returning until either the job has moved +to `STOPPED` or the specified time has elapsed. If the specified time elapses +without the job moving to `STOPPED`, a timeout exception is thrown. diff --git a/docs/reference/rollup/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc index 5981336d0a054..c156265c2ffc3 100644 --- a/docs/reference/rollup/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -7,9 +7,9 @@ [[rollup-jobs-endpoint]] === Jobs -* <>, <>, -* <>, <>, -* <> +* <> or <> +* <> or <> +* <> * <> [float] @@ -26,13 +26,12 @@ * <> - +include::apis/put-job.asciidoc[] include::apis/delete-job.asciidoc[] include::apis/get-job.asciidoc[] -include::apis/put-job.asciidoc[] -include::apis/start-job.asciidoc[] -include::apis/stop-job.asciidoc[] include::apis/rollup-caps.asciidoc[] include::apis/rollup-index-caps.asciidoc[] include::apis/rollup-search.asciidoc[] include::apis/rollup-job-config.asciidoc[] +include::apis/start-job.asciidoc[] +include::apis/stop-job.asciidoc[] \ No newline at end of file diff --git a/docs/reference/scripting/fields.asciidoc b/docs/reference/scripting/fields.asciidoc index cf8905189f799..ef8aee98ced7c 100644 --- a/docs/reference/scripting/fields.asciidoc +++ b/docs/reference/scripting/fields.asciidoc @@ -19,7 +19,7 @@ API will have access to the `ctx` variable which exposes: [float] == Search and aggregation scripts -With the exception of <> which are +With the exception of <> which are executed once per search hit, scripts used in search and aggregations will be executed once for every document which might match a query or an aggregation. Depending on how many documents you have, this could mean millions or billions @@ -34,7 +34,7 @@ Field values can be accessed from a script using === Accessing the score of a document within a script Scripts used in the <>, -in <>, or in +in <>, or in <> have access to the `_score` variable which represents the current relevance score of a document. @@ -162,7 +162,7 @@ many documents. It makes sense to use `_source` or stored fields when generating a -<> for the top ten hits from a search +<> for the top ten hits from a search result but, for other search and aggregation use cases, always prefer using doc values. ========================================================= diff --git a/docs/reference/scripting/security.asciidoc b/docs/reference/scripting/security.asciidoc index 421cec2ccf760..c44538096c372 100644 --- a/docs/reference/scripting/security.asciidoc +++ b/docs/reference/scripting/security.asciidoc @@ -78,10 +78,10 @@ security of the Elasticsearch deployment. [float] === Allowed script types setting -By default all script types are allowed to be executed. This can be modified using the -setting `script.allowed_types`. Only the types specified as part of the setting will be -allowed to be executed. To specify no types are allowed, set `script.allowed_types` to -be `none`. +Elasticsearch supports two script types: `inline` and `stored` (<>). +By default both types of scripts are allowed to be executed. To limit the types of scripts +that can run, set `script.allowed_types` to `inline`, `stored`, or `none`. For example, +to run `inline` scripts but not `stored` scripts, specify: [source,yaml] ---- diff --git a/docs/reference/scripting/using.asciidoc b/docs/reference/scripting/using.asciidoc index 6da631ac9f592..a9646f2f3c8c6 100644 --- a/docs/reference/scripting/using.asciidoc +++ b/docs/reference/scripting/using.asciidoc @@ -18,7 +18,7 @@ the same pattern: <3> Any named parameters that should be passed into the script. For example, the following script is used in a search request to return a -<>: +<>: [source,js] ------------------------------------- diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index e99fb6f388d02..84ff2e45282c8 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -1,15 +1,12 @@ [[search]] -= Search APIs - -[partintro] --- +== Search APIs Most search APIs are <>, with the exception of the <> endpoints. [float] [[search-routing]] -== Routing +=== Routing When executing a search, Elasticsearch will pick the "best" copy of the data based on the <> formula. @@ -59,7 +56,7 @@ the routing values match to. [float] [[search-adaptive-replica]] -== Adaptive Replica Selection +=== Adaptive Replica Selection By default, Elasticsearch will use what is called adaptive replica selection. This allows the coordinating node to send the request to the copy deemed "best" @@ -90,7 +87,7 @@ index/indices shards in a round robin fashion between all copies of the data [float] [[stats-groups]] -== Stats Groups +=== Stats Groups A search can be associated with stats groups, which maintains a statistics aggregation per group. It can later be retrieved using the @@ -113,7 +110,7 @@ POST /_search [float] [[global-search-timeout]] -== Global Search Timeout +=== Global Search Timeout Individual searches can have a timeout as part of the <>. Since search requests can originate from many @@ -130,7 +127,7 @@ Setting this value to `-1` resets the global search timeout to no timeout. [float] [[global-search-cancellation]] -== Search Cancellation +=== Search Cancellation Searches can be cancelled using standard <> mechanism. By default, a running search only checks if it is cancelled or @@ -143,7 +140,7 @@ setting only affects the searches that start after the change is made. [float] [[search-concurrency-and-parallelism]] -== Search concurrency and parallelism +=== Search concurrency and parallelism By default Elasticsearch doesn't reject any search requests based on the number of shards the request hits. While Elasticsearch will optimize the search @@ -160,8 +157,6 @@ overloading a cluster (e.g., a default request will hit all indices in a cluster which could cause shard request rejections if the number of shards per node is high). This default value is `5`. --- - include::search/search.asciidoc[] include::search/uri-request.asciidoc[] diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index 19ba77cd12979..93262a12f8acd 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -1,5 +1,5 @@ [[search-count]] -== Count API +=== Count API The count API allows to easily execute a query and get the number of matches for that query. It can be executed across one or more indices. @@ -49,12 +49,12 @@ The query is optional, and when not provided, it will use `match_all` to count all the docs. [float] -=== Multi index +==== Multi index The count API can be applied to <>. [float] -=== Request Parameters +==== Request Parameters When executing count using the query parameter `q`, the query passed is a query string using Lucene query parser. There are additional @@ -85,7 +85,7 @@ Defaults to no terminate_after. |======================================================================= [float] -=== Request Body +==== Request Body The count can use the <> within its body in order to express the query that should be executed. The body @@ -95,14 +95,14 @@ Both HTTP GET and HTTP POST can be used to execute count with body. Since not all clients support GET with body, POST is allowed as well. [float] -=== Distributed +==== Distributed The count operation is broadcast across all shards. For each shard id group, a replica is chosen and executed against it. This means that replicas increase the scalability of count. [float] -=== Routing +==== Routing The routing value (a comma separated list of the routing values) can be specified to control which shards the count request will be executed on. diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index 1d91839b064c6..061bae555bcfb 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -1,5 +1,5 @@ [[search-explain]] -== Explain API +=== Explain API The explain api computes a score explanation for a query and a specific document. This can give useful feedback whether a document matches or @@ -8,7 +8,7 @@ didn't match a specific query. Note that a single index must be provided to the `index` parameter. [float] -=== Usage +==== Usage Full query example: @@ -116,7 +116,7 @@ GET /twitter/_explain/0?q=message:search This will yield the same result as the previous request. [float] -=== All parameters: +==== All parameters: [horizontal] `_source`:: diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index d150e8e5fa6db..c7d532a0b869d 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -1,5 +1,5 @@ [[search-field-caps]] -== Field Capabilities API +=== Field Capabilities API The field capabilities API allows to retrieve the capabilities of fields among multiple indices. @@ -27,7 +27,7 @@ Supported request options: will cause all fields that match the expression to be returned. [float] -=== Field Capabilities +==== Field Capabilities The field capabilities API returns the following information per field: @@ -57,7 +57,7 @@ or null if all indices have the same definition for the field. [float] -=== Response format +==== Response format Request: @@ -70,8 +70,8 @@ GET _field_caps?fields=rating,title [source,js] -------------------------------------------------- { + "indices": ["index1", "index2", "index3", "index4", "index5"], "fields": { - "indices": ["index1", "index2", "index3", "index4", "index5"], "rating": { <1> "long": { "searchable": true, @@ -105,7 +105,7 @@ and as a `keyword` in `index3` and `index4`. <4> The field `title` is defined as `text` in all indices. [float] -=== Unmapped fields +==== Unmapped fields By default unmapped fields are ignored. You can include them in the response by adding a parameter called `include_unmapped` in the request: @@ -122,8 +122,8 @@ some indices but not all: [source,js] -------------------------------------------------- { + "indices": ["index1", "index2", "index3"], "fields": { - "indices": ["index1", "index2", "index3"], "rating": { "long": { "searchable": true, diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index 87a87c922b37c..42dbd0f5c2a1a 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -1,5 +1,5 @@ [[search-multi-search]] -== Multi Search API +=== Multi Search API The multi search API allows to execute several search requests within the same API. The endpoint for it is `_msearch`. @@ -98,13 +98,13 @@ increase this value to a higher number. [float] [[msearch-security]] -=== Security +==== Security See <> [float] [[template-msearch]] -=== Template support +==== Template support Much like described in <> for the _search resource, _msearch also provides support for templates. Submit them like follows: @@ -177,5 +177,5 @@ GET _msearch/template [float] [[multi-search-partial-responses]] -=== Partial responses +==== Partial responses To ensure fast responses, the multi search API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index e5ba1b851cdc8..44628133f6bff 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -1,5 +1,5 @@ [[search-profile]] -== Profile API +=== Profile API WARNING: The Profile API is a debugging tool and adds significant overhead to search execution. @@ -14,7 +14,7 @@ The output from the Profile API is *very* verbose, especially for complicated re many shards. Pretty-printing the response is recommended to help understand the output [float] -=== Usage +==== Usage Any `_search` request can be profiled by adding a top-level `profile` parameter: @@ -227,8 +227,8 @@ NOTE: As with other statistics apis, the Profile API supports human readable out `?human=true` to the query string. In this case, the output contains the additional `time` field containing rounded, human readable timing information (e.g. `"time": "391,9ms"`, `"time": "123.3micros"`). -[[search-profile-queries]] -=== Profiling Queries +[[profiling-queries]] +==== Profiling Queries [NOTE] ======================================= @@ -244,7 +244,7 @@ the `advance` phase of that query is the cause, for example. ======================================= [[query-section]] -==== `query` Section +===== `query` Section The `query` section contains detailed timing of the query tree executed by Lucene on a particular shard. The overall structure of this query tree will resemble your original Elasticsearch query, but may be slightly @@ -296,7 +296,7 @@ that in a moment. Finally, the `children` array lists any sub-queries that may values ("search test"), our BooleanQuery holds two children TermQueries. They have identical information (type, time, breakdown, etc). Children are allowed to have their own children. -===== Timing Breakdown +====== Timing Breakdown The `breakdown` component lists detailed timing statistics about low-level Lucene execution: @@ -335,7 +335,7 @@ the breakdown is inclusive of all children times. The meaning of the stats are as follows: [float] -==== All parameters: +===== All parameters: [horizontal] `create_weight`:: @@ -401,7 +401,7 @@ The meaning of the stats are as follows: how selective queries are, by comparing counts between different query components. [[collectors-section]] -==== `collectors` Section +===== `collectors` Section The Collectors portion of the response shows high-level execution details. Lucene works by defining a "Collector" which is responsible for coordinating the traversal, scoring, and collection of matching documents. Collectors @@ -488,7 +488,7 @@ For reference, the various collector reasons are: [[rewrite-section]] -==== `rewrite` Section +===== `rewrite` Section All queries in Lucene undergo a "rewriting" process. A query (and its sub-queries) may be rewritten one or more times, and the process continues until the query stops changing. This process allows Lucene to perform @@ -500,7 +500,7 @@ The rewriting process is complex and difficult to display, since queries can cha showing the intermediate results, the total rewrite time is simply displayed as a value (in nanoseconds). This value is cumulative and contains the total time for all queries being rewritten. -==== A more complex example +===== A more complex example To demonstrate a slightly more complex query and the associated results, we can profile the following query: @@ -674,7 +674,7 @@ The Collector tree is fairly straightforward, showing how a single CancellableCo which also wraps a FilteredCollector to execute the post_filter (and in turn wraps the normal scoring SimpleCollector), a BucketCollector to run all scoped aggregations. -==== Understanding MultiTermQuery output +===== Understanding MultiTermQuery output A special note needs to be made about the `MultiTermQuery` class of queries. This includes wildcards, regex, and fuzzy queries. These queries emit very verbose responses, and are not overly structured. @@ -693,11 +693,11 @@ ignore its children if you find the details too tricky to interpret. Hopefully this will be fixed in future iterations, but it is a tricky problem to solve and still in-progress :) -[[search-profile-aggregations]] -=== Profiling Aggregations +[[profiling-aggregations]] +==== Profiling Aggregations [[agg-section]] -==== `aggregations` Section +===== `aggregations` Section The `aggregations` section contains detailed timing of the aggregation tree executed by a particular shard. @@ -817,7 +817,7 @@ aggregation then has a child `LongTermsAggregator` which comes from the second t The `time_in_nanos` field shows the time executed by each aggregation, and is inclusive of all children. While the overall time is useful, the `breakdown` field will give detailed stats about how the time was spent. -===== Timing Breakdown +====== Timing Breakdown The `breakdown` component lists detailed timing statistics about low-level Lucene execution: @@ -845,7 +845,7 @@ the breakdown is inclusive of all children times. The meaning of the stats are as follows: [float] -==== All parameters: +===== All parameters: [horizontal] `initialise`:: @@ -868,10 +868,10 @@ The meaning of the stats are as follows: Records the number of invocations of the particular method. For example, `"collect_count": 2,` means the `collect()` method was called on two different documents. -[[search-profile-considerations]] -=== Profiling Considerations +[[profiling-considerations]] +==== Profiling Considerations -==== Performance Notes +===== Performance Notes Like any profiler, the Profile API introduces a non-negligible overhead to search execution. The act of instrumenting low-level method calls such as `collect`, `advance`, and `next_doc` can be fairly expensive, since these methods are called @@ -883,7 +883,7 @@ could cause some queries to report larger relative times than their non-profiled not have a drastic effect compared to other components in the profiled query. [[profile-limitations]] -==== Limitations +===== Limitations - Profiling currently does not measure the search fetch phase nor the network overhead - Profiling also does not account for time spent in the queue, merging shard responses on the coordinating node, or diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index 0ec2e070b1c74..aa1b8589cce97 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -1,5 +1,5 @@ [[search-rank-eval]] -== Ranking Evaluation API +=== Ranking Evaluation API experimental["The ranking evaluation API is experimental and may be changed or removed completely in a future release, as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features."] @@ -10,7 +10,7 @@ returns typical information retrieval metrics like _mean reciprocal rank_, _precision_ or _discounted cumulative gain_. [float] -=== Overview +==== Overview Search quality evaluation starts with looking at the users of your search application, and the things that they are searching for. Users have a specific _information need_, e.g. they are looking for gift in a web shop or want to book a flight for their next holiday. @@ -31,7 +31,7 @@ In order to get started with search quality evaluation, three basic things are n The ranking evaluation API provides a convenient way to use this information in a ranking evaluation request to calculate different search evaluation metrics. This gives a first estimation of your overall search quality and give you a measurement to optimize against when fine-tuning various aspect of the query generation in your application. [float] -=== Ranking evaluation request structure +==== Ranking evaluation request structure In its most basic form, a request to the `_rank_eval` endpoint has two sections: @@ -88,7 +88,7 @@ the rating of the documents relevance with regards to this search request A document `rating` can be any integer value that expresses the relevance of the document on a user defined scale. For some of the metrics, just giving a binary rating (e.g. `0` for irrelevant and `1` for relevant) will be sufficient, other metrics can use a more fine grained scale. [float] -=== Template based ranking evaluation +==== Template based ranking evaluation As an alternative to having to provide a single query per test request, it is possible to specify query templates in the evaluation request and later refer to them. Queries with similar structure that only differ in their parameters don't have to be repeated all the time in the `requests` section this way. In typical search systems where user inputs usually get filled into a small set of query templates, this helps making the evaluation request more succinct. @@ -130,14 +130,14 @@ GET /my_index/_rank_eval <4> the parameters to use to fill the template [float] -=== Available evaluation metrics +==== Available evaluation metrics The `metric` section determines which of the available evaluation metrics is going to be used. Currently, the following metrics are supported: [float] [[k-precision]] -==== Precision at K (P@k) +===== Precision at K (P@k) This metric measures the number of relevant results in the top k search results. Its a form of the well known https://en.wikipedia.org/wiki/Information_retrieval#Precision[Precision] metric that only looks at the top k documents. It is the fraction of relevant documents in those first k search. A precision at 10 (P@10) value of 0.6 then means six out of the 10 top hits are relevant with respect to the users information need. @@ -183,7 +183,7 @@ If set to 'true', unlabeled documents are ignored and neither count as relevant |======================================================================= [float] -==== Mean reciprocal rank +===== Mean reciprocal rank For every query in the test suite, this metric calculates the reciprocal of the rank of the first relevant document. For example finding the first relevant result @@ -223,7 +223,7 @@ in the query. Defaults to 10. |======================================================================= [float] -==== Discounted cumulative gain (DCG) +===== Discounted cumulative gain (DCG) In contrast to the two metrics above, https://en.wikipedia.org/wiki/Discounted_cumulative_gain[discounted cumulative gain] takes both, the rank and the rating of the search results, into account. @@ -261,7 +261,7 @@ in the query. Defaults to 10. |======================================================================= [float] -==== Expected Reciprocal Rank (ERR) +===== Expected Reciprocal Rank (ERR) Expected Reciprocal Rank (ERR) is an extension of the classical reciprocal rank for the graded relevance case (Olivier Chapelle, Donald Metzler, Ya Zhang, and Pierre Grinspan. 2009. http://olivier.chapelle.cc/pub/err.pdf[Expected reciprocal rank for graded relevance].) @@ -311,7 +311,7 @@ in the query. Defaults to 10. |======================================================================= [float] -=== Response format +==== Response format The response of the `_rank_eval` endpoint contains the overall calculated result for the defined quality metric, a `details` section with a breakdown of results for each query in the test suite and an optional `failures` section diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index aa67ffec59181..18d202e65f06f 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -1,5 +1,5 @@ [[search-request-body]] -== Request Body Search +=== Request Body Search The search request can be executed with a search DSL, which includes the <>, within its body. Here is an @@ -56,7 +56,7 @@ And here is a sample response: // TESTRESPONSE[s/"took": 1/"took": $body.took/] [float] -=== Parameters +==== Parameters [horizontal] `timeout`:: @@ -82,7 +82,7 @@ And here is a sample response: The type of the search operation to perform. Can be `dfs_query_then_fetch` or `query_then_fetch`. Defaults to `query_then_fetch`. - See <> for more. + See <> for more. `request_cache`:: @@ -129,7 +129,7 @@ Both HTTP GET and HTTP POST can be used to execute search with body. Since not all clients support GET with body, POST is allowed as well. [float] -=== Fast check for any matching docs +==== Fast check for any matching docs NOTE: `terminate_after` is always applied **after** the `post_filter` and stops the query as well as the aggregation executions when enough hits have been diff --git a/docs/reference/search/request/collapse.asciidoc b/docs/reference/search/request/collapse.asciidoc index 1ab79e36c7e9d..8cc48fbcc634f 100644 --- a/docs/reference/search/request/collapse.asciidoc +++ b/docs/reference/search/request/collapse.asciidoc @@ -1,5 +1,5 @@ -[[search-request-collapse]] -=== Field Collapsing +[[request-body-search-collapse]] +==== Field Collapsing Allows to collapse search results based on field values. The collapsing is done by selecting only the top sorted document per collapse key. @@ -35,7 +35,7 @@ The field used for collapsing must be a single valued <> or NOTE: The collapsing is applied to the top hits only and does not affect aggregations. -==== Expand collapse results +===== Expand collapse results It is also possible to expand each collapsed top hits with the `inner_hits` option. @@ -68,7 +68,7 @@ GET /twitter/_search <4> how to sort the document inside each group <5> the number of concurrent requests allowed to retrieve the inner_hits` per group -See <> for the complete list of supported options and the format of the response. +See <> for the complete list of supported options and the format of the response. It is also possible to request multiple `inner_hits` for each collapsed hit. This can be useful when you want to get multiple representations of the collapsed hits. @@ -114,10 +114,10 @@ The `max_concurrent_group_searches` request parameter can be used to control the maximum number of concurrent searches allowed in this phase. The default is based on the number of data nodes and the default search thread pool size. -WARNING: `collapse` cannot be used in conjunction with <>, -<> or <>. +WARNING: `collapse` cannot be used in conjunction with <>, +<> or <>. -==== Second level of collapsing +===== Second level of collapsing Second level of collapsing is also supported and is applied to `inner_hits`. For example, the following request finds the top scored tweets for diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index 784cc94015366..6190eb6aef13f 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -1,5 +1,5 @@ -[[search-request-docvalue-fields]] -=== Doc value Fields +[[request-body-search-docvalue-fields]] +==== Doc value Fields Allows to return the <> representation of a field for each hit, for example: @@ -55,7 +55,7 @@ Note that if the fields parameter specifies fields without docvalues it will try causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption. [float] -==== Custom formats +====== Custom formats While most fields do not support custom formats, some of them do: @@ -70,4 +70,4 @@ with the configured `date` format, etc. NOTE: On its own, `docvalue_fields` cannot be used to load fields in nested objects -- if a field contains a nested object in its path, then no data will be returned for that docvalue field. To access nested fields, `docvalue_fields` -must be used within an <> block. \ No newline at end of file +must be used within an <> block. \ No newline at end of file diff --git a/docs/reference/search/request/explain.asciidoc b/docs/reference/search/request/explain.asciidoc index 9bcaecb484060..704b958edd016 100644 --- a/docs/reference/search/request/explain.asciidoc +++ b/docs/reference/search/request/explain.asciidoc @@ -1,5 +1,5 @@ -[[search-request-explain]] -=== Explain +[[request-body-search-explain]] +==== Explain Enables explanation for each hit on how its score was computed. diff --git a/docs/reference/search/request/from-size.asciidoc b/docs/reference/search/request/from-size.asciidoc index 1c44a7ca8d286..04befada139a0 100644 --- a/docs/reference/search/request/from-size.asciidoc +++ b/docs/reference/search/request/from-size.asciidoc @@ -1,5 +1,5 @@ -[[search-request-from-size]] -=== From / Size +[[request-body-search-from-size]] +==== From / Size Pagination of results can be done by using the `from` and `size` parameters. The `from` parameter defines the offset from the first @@ -24,5 +24,5 @@ GET /_search Note that `from` + `size` can not be more than the `index.max_result_window` -index setting which defaults to 10,000. See the <> or <> +index setting which defaults to 10,000. See the <> or <> API for more efficient ways to do deep scrolling. diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index ad836c7c535e7..907ef3a511b8b 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -1,5 +1,5 @@ -[[search-request-highlighting]] -=== Highlighting +[[request-body-search-highlighting]] +==== Highlighting Highlighters enable you to get highlighted snippets from one or more fields in your search results so you can show users where the query matches are. @@ -42,7 +42,7 @@ highlighter). You can specify the highlighter `type` you want to use for each field. [[unified-highlighter]] -==== Unified highlighter +===== Unified highlighter The `unified` highlighter uses the Lucene Unified Highlighter. This highlighter breaks the text into sentences and uses the BM25 algorithm to score individual sentences as if they were documents in the corpus. It also supports @@ -50,7 +50,7 @@ accurate phrase and multi-term (fuzzy, prefix, regex) highlighting. This is the default highlighter. [[plain-highlighter]] -==== Plain highlighter +===== Plain highlighter The `plain` highlighter uses the standard Lucene highlighter. It attempts to reflect the query matching logic in terms of understanding word importance and any word positioning criteria in phrase queries. @@ -65,7 +65,7 @@ If you want to highlight a lot of fields in a lot of documents with complex queries, we recommend using the `unified` highlighter on `postings` or `term_vector` fields. [[fast-vector-highlighter]] -==== Fast vector highlighter +===== Fast vector highlighter The `fvh` highlighter uses the Lucene Fast Vector highlighter. This highlighter can be used on fields with `term_vector` set to `with_positions_offsets` in the mapping. The fast vector highlighter: @@ -84,7 +84,7 @@ The `fvh` highlighter does not support span queries. If you need support for span queries, try an alternative highlighter, such as the `unified` highlighter. [[offsets-strategy]] -==== Offsets Strategy +===== Offsets Strategy To create meaningful search snippets from the terms being queried, the highlighter needs to know the start and end character offsets of each word in the original text. These offsets can be obtained from: @@ -117,7 +117,7 @@ limited to 1000000. This default limit can be changed for a particular index with the index setting `index.highlight.max_analyzed_offset`. [[highlighting-settings]] -==== Highlighting Settings +===== Highlighting Settings Highlighting settings can be set on a global level and overridden at the field level. @@ -255,7 +255,7 @@ type:: The highlighter to use: `unified`, `plain`, or `fvh`. Defaults to `unified`. [[highlighting-examples]] -==== Highlighting Examples +===== Highlighting Examples * <> * <> @@ -271,7 +271,7 @@ type:: The highlighter to use: `unified`, `plain`, or `fvh`. Defaults to [[override-global-settings]] [float] -=== Override global settings +==== Override global settings You can specify highlighter settings globally and selectively override them for individual fields. @@ -300,7 +300,7 @@ GET /_search [float] [[specify-highlight-query]] -=== Specify a highlight query +==== Specify a highlight query You can specify a `highlight_query` to take additional information into account when highlighting. For example, the following query includes both the search @@ -370,7 +370,7 @@ GET /_search [float] [[set-highlighter-type]] -=== Set highlighter type +==== Set highlighter type The `type` field allows to force a specific highlighter type. The allowed values are: `unified`, `plain` and `fvh`. @@ -395,7 +395,7 @@ GET /_search [[configure-tags]] [float] -=== Configure highlighting tags +==== Configure highlighting tags By default, the highlighting will wrap highlighted text in `` and ``. This can be controlled by setting `pre_tags` and `post_tags`, @@ -464,7 +464,7 @@ GET /_search [float] [[highlight-source]] -=== Highlight on source +==== Highlight on source Forces the highlighting to highlight fields based on the source even if fields are stored separately. Defaults to `false`. @@ -489,7 +489,7 @@ GET /_search [[highlight-all]] [float] -=== Highlight in all fields +==== Highlight in all fields By default, only fields that contains a query match are highlighted. Set `require_field_match` to `false` to highlight all fields. @@ -514,7 +514,7 @@ GET /_search [[matched-fields]] [float] -=== Combine matches on multiple fields +==== Combine matches on multiple fields WARNING: This is only supported by the `fvh` highlighter @@ -651,7 +651,7 @@ to [[explicit-field-order]] [float] -=== Explicitly order highlighted fields +==== Explicitly order highlighted fields Elasticsearch highlights the fields in the order that they are sent, but per the JSON spec, objects are unordered. If you need to be explicit about the order in which fields are highlighted specify the `fields` as an array: @@ -679,7 +679,7 @@ fields are highlighted but a plugin might. [float] [[control-highlighted-frags]] -=== Control highlighted fragments +==== Control highlighted fragments Each field highlighted can control the size of the highlighted fragment in characters (defaults to `100`), and the maximum number of fragments @@ -780,7 +780,7 @@ GET /_search [float] [[highlight-postings-list]] -=== Highlight using the postings list +==== Highlight using the postings list Here is an example of setting the `comment` field in the index mapping to allow for highlighting using the postings: @@ -822,7 +822,7 @@ PUT /example [float] [[specify-fragmenter]] -=== Specify a fragmenter for the plain highlighter +==== Specify a fragmenter for the plain highlighter When using the `plain` highlighter, you can choose between the `simple` and `span` fragmenters: diff --git a/docs/reference/search/request/index-boost.asciidoc b/docs/reference/search/request/index-boost.asciidoc index 683fe910f5e6e..9cf09fcd7b40d 100644 --- a/docs/reference/search/request/index-boost.asciidoc +++ b/docs/reference/search/request/index-boost.asciidoc @@ -1,5 +1,5 @@ -[[search-request-index-boost]] -=== Index Boost +[[request-body-search-index-boost]] +==== Index Boost Allows to configure different boost level per index when searching across more than one indices. This is very handy when hits coming from diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index 7774e34c2c00d..8aef784e69d50 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -1,5 +1,5 @@ -[[search-request-inner-hits]] -=== Inner hits +[[request-body-search-inner-hits]] +==== Inner hits The <> and <> features allow the return of documents that have matches in a different scope. In the parent/child case, parent documents are returned based on matches in child @@ -55,7 +55,7 @@ If `inner_hits` is defined on a query that supports it then each search hit will -------------------------------------------------- // NOTCONSOLE -==== Options +===== Options Inner hits support the following options: @@ -70,16 +70,16 @@ Inner hits support the following options: Inner hits also supports the following per document features: -* <> -* <> -* <> -* <> -* <> -* <> -* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> [[nested-inner-hits]] -==== Nested inner hits +===== Nested inner hits The nested `inner_hits` can be used to include nested inner objects as inner hits to a search hit. @@ -198,7 +198,7 @@ So in the above example only the comment part is returned per nested hit and not document that contained the comment. [[nested-inner-hits-source]] -==== Nested inner hits and +_source+ +===== Nested inner hits and +_source+ Nested document don't have a `_source` field, because the entire source of document is stored with the root document under its `_source` field. To include the source of just the nested document, the source of the root document is parsed and just @@ -315,7 +315,7 @@ Response not included in text but tested for completeness sake. //// [[hierarchical-nested-inner-hits]] -==== Hierarchical levels of nested object fields and inner hits. +===== Hierarchical levels of nested object fields and inner hits. If a mapping has multiple levels of hierarchical nested object fields each level can be accessed via dot notated path. For example if there is a `comments` nested field that contains a `votes` nested field and votes should directly be returned @@ -437,7 +437,7 @@ Which would look like: This indirect referencing is only supported for nested inner hits. [[parent-child-inner-hits]] -==== Parent/child inner hits +===== Parent/child inner hits The parent/child `inner_hits` can be used to include parent or child: diff --git a/docs/reference/search/request/min-score.asciidoc b/docs/reference/search/request/min-score.asciidoc index d9dbef99ddfc5..1a03d6d3ee483 100644 --- a/docs/reference/search/request/min-score.asciidoc +++ b/docs/reference/search/request/min-score.asciidoc @@ -1,5 +1,5 @@ -[[search-request-min-score]] -=== min_score +[[request-body-search-min-score]] +==== min_score Exclude documents which have a `_score` less than the minimum specified in `min_score`: diff --git a/docs/reference/search/request/named-queries-and-filters.asciidoc b/docs/reference/search/request/named-queries-and-filters.asciidoc index 0fb602539386e..c850210f03046 100644 --- a/docs/reference/search/request/named-queries-and-filters.asciidoc +++ b/docs/reference/search/request/named-queries-and-filters.asciidoc @@ -1,5 +1,5 @@ -[[search-request-named-queries-and-filters]] -=== Named Queries +[[request-body-search-queries-and-filters]] +==== Named Queries Each filter and query can accept a `_name` in its top level definition. diff --git a/docs/reference/search/request/post-filter.asciidoc b/docs/reference/search/request/post-filter.asciidoc index c46cdb1e52286..478b4082e04c3 100644 --- a/docs/reference/search/request/post-filter.asciidoc +++ b/docs/reference/search/request/post-filter.asciidoc @@ -1,5 +1,5 @@ -[[search-request-post-filter]] -=== Post filter +[[request-body-search-post-filter]] +==== Post filter The `post_filter` is applied to the search `hits` at the very end of a search request, after aggregations have already been calculated. Its purpose is diff --git a/docs/reference/search/request/preference.asciidoc b/docs/reference/search/request/preference.asciidoc index 7412f04844c08..12bcca51c1777 100644 --- a/docs/reference/search/request/preference.asciidoc +++ b/docs/reference/search/request/preference.asciidoc @@ -1,5 +1,5 @@ -[[search-request-preference]] -=== Preference +[[request-body-search-preference]] +==== Preference Controls a `preference` of the shard copies on which to execute the search. By default, Elasticsearch selects from the available shard copies in an diff --git a/docs/reference/search/request/query.asciidoc b/docs/reference/search/request/query.asciidoc index fa06d0d9bb40f..d114bf93791eb 100644 --- a/docs/reference/search/request/query.asciidoc +++ b/docs/reference/search/request/query.asciidoc @@ -1,5 +1,5 @@ -[[search-request-query]] -=== Query +[[request-body-search-query]] +==== Query The query element within the search request body allows to define a query using the <>. diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc index c2277aada3f52..64c3896ee3da2 100644 --- a/docs/reference/search/request/rescore.asciidoc +++ b/docs/reference/search/request/rescore.asciidoc @@ -1,10 +1,10 @@ -[[search-request-rescore]] -=== Rescoring +[[request-body-search-rescore]] +==== Rescoring Rescoring can help to improve precision by reordering just the top (eg 100 - 500) documents returned by the -<> and -<> phases, using a +<> and +<> phases, using a secondary (usually more costly) algorithm, instead of applying the costly algorithm to all documents in the index. @@ -15,7 +15,7 @@ Currently the rescore API has only one implementation: the query rescorer, which uses a query to tweak the scoring. In the future, alternative rescorers may be made available, for example, a pair-wise rescorer. -NOTE: An error will be thrown if an explicit <> +NOTE: An error will be thrown if an explicit <> (other than `_score` in descending order) is provided with a `rescore` query. NOTE: when exposing pagination to your users, you should not change @@ -23,11 +23,11 @@ NOTE: when exposing pagination to your users, you should not change `from` values) since that can alter the top hits causing results to confusingly shift as the user steps through pages. -==== Query rescorer +===== Query rescorer The query rescorer executes a second query only on the Top-K results -returned by the <> and -<> phases. The +returned by the <> and +<> phases. The number of docs which will be examined on each shard can be controlled by the `window_size` parameter, which defaults to 10. @@ -83,7 +83,7 @@ for <> rescores. |`min` |Take the min of the original score and the rescore query score. |======================================================================= -==== Multiple Rescores +===== Multiple Rescores It is also possible to execute multiple rescores in sequence: diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index 1bd61e0048182..090e403c57856 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -1,5 +1,5 @@ -[[search-request-script-fields]] -=== Script Fields +[[request-body-search-script-fields]] +==== Script Fields Allows to return a <> (based on different fields) for each hit, for example: diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 3503ad2c9c826..bb5ad2888647c 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -1,5 +1,5 @@ -[[search-request-scroll]] -=== Scroll +[[request-body-search-scroll]] +==== Scroll While a `search` request returns a single ``page'' of results, the `scroll` API can be used to retrieve large numbers of results (or even all results) @@ -101,7 +101,7 @@ GET /_search?scroll=1m // TEST[setup:twitter] [[scroll-search-context]] -==== Keeping the search context alive +===== Keeping the search context alive A scroll returns all the documents which matched the search at the time of the initial search request. It ignores any subsequent changes to these documents. @@ -148,7 +148,7 @@ GET /_nodes/stats/indices/search --------------------------------------- // CONSOLE -==== Clear scroll API +===== Clear scroll API Search context are automatically removed when the `scroll` timeout has been exceeded. However keeping scrolls open has a cost, as discussed in the @@ -200,7 +200,7 @@ DELETE /_search/scroll/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMN // TEST[catch:missing] [[sliced-scroll]] -==== Sliced Scroll +===== Sliced Scroll For scroll queries that return a lot of documents it is possible to split the scroll in multiple slices which can be consumed independently: diff --git a/docs/reference/search/request/search-after.asciidoc b/docs/reference/search/request/search-after.asciidoc index 53d25c613b928..e940b0688cc24 100644 --- a/docs/reference/search/request/search-after.asciidoc +++ b/docs/reference/search/request/search-after.asciidoc @@ -1,9 +1,9 @@ -[[search-request-search-after]] -=== Search After +[[request-body-search-search-after]] +==== Search After Pagination of results can be done by using the `from` and `size` but the cost becomes prohibitive when the deep pagination is reached. The `index.max_result_window` which defaults to 10,000 is a safeguard, search requests take heap memory and time proportional to `from + size`. -The <> api is recommended for efficient deep scrolling but scroll contexts are costly and it is not +The <> api is recommended for efficient deep scrolling but scroll contexts are costly and it is not recommended to use it for real time user requests. The `search_after` parameter circumvents this problem by providing a live cursor. The idea is to use the results from the previous page to help the retrieval of the next page. diff --git a/docs/reference/search/request/search-type.asciidoc b/docs/reference/search/request/search-type.asciidoc index 7cac034f29c25..684d84358896a 100644 --- a/docs/reference/search/request/search-type.asciidoc +++ b/docs/reference/search/request/search-type.asciidoc @@ -1,5 +1,5 @@ -[[search-request-search-type]] -=== Search Type +[[request-body-search-search-type]] +==== Search Type There are different execution paths that can be done when executing a distributed search. The distributed search operation needs to be @@ -35,7 +35,7 @@ by setting the *search_type* parameter in the query string. The types are: [[query-then-fetch]] -==== Query Then Fetch +===== Query Then Fetch Parameter value: *query_then_fetch*. @@ -54,7 +54,7 @@ NOTE: This is the default setting, if you do not specify a `search_type` in your request. [[dfs-query-then-fetch]] -==== Dfs, Query Then Fetch +===== Dfs, Query Then Fetch Parameter value: *dfs_query_then_fetch*. diff --git a/docs/reference/search/request/seq-no.asciidoc b/docs/reference/search/request/seq-no.asciidoc index 0ab7bec4487d9..5bfc328a30906 100644 --- a/docs/reference/search/request/seq-no.asciidoc +++ b/docs/reference/search/request/seq-no.asciidoc @@ -1,5 +1,5 @@ -[[search-request-seq-no-primary-term]] -=== Sequence Numbers and Primary Term +[[request-body-search-seq-no-primary-term]] +==== Sequence Numbers and Primary Term Returns the sequence number and primary term of the last modification to each search hit. See <> for more details. diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index ccbc3da6e063b..0bd27560b987b 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -1,5 +1,5 @@ -[[search-request-sort]] -=== Sort +[[request-body-search-sort]] +==== Sort Allows you to add one or more sorts on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special @@ -48,14 +48,14 @@ GET /my_index/_search NOTE: `_doc` has no real use-case besides being the most efficient sort order. So if you don't care about the order in which documents are returned, then you -should sort by `_doc`. This especially helps when <>. +should sort by `_doc`. This especially helps when <>. -==== Sort Values +===== Sort Values The sort values for each document returned are also returned as part of the response. -==== Sort Order +===== Sort Order The `order` option can have the following values: @@ -66,7 +66,7 @@ The `order` option can have the following values: The order defaults to `desc` when sorting on the `_score`, and defaults to `asc` when sorting on anything else. -==== Sort mode option +===== Sort mode option Elasticsearch supports sorting by array or multi-valued fields. The `mode` option controls what array value is picked for sorting the document it belongs @@ -86,7 +86,7 @@ The default sort mode in the ascending sort order is `min` -- the lowest value is picked. The default sort mode in the descending order is `max` -- the highest value is picked. -===== Sort mode example usage +====== Sort mode example usage In the example below the field price has multiple prices per document. In this case the result hits will be sorted by price ascending based on @@ -112,7 +112,7 @@ POST /_search -------------------------------------------------- // CONSOLE -==== Sorting numeric fields +===== Sorting numeric fields For numeric fields it is also possible to cast the values from one type to another using the `numeric_type` option. @@ -237,7 +237,7 @@ To avoid overflow, the conversion to `date_nanos` cannot be applied on dates bef 1970 and after 2262 as nanoseconds are represented as longs. [[nested-sorting]] -==== Sorting within nested objects. +===== Sorting within nested objects. Elasticsearch also supports sorting by fields that are inside one or more nested objects. The sorting by nested @@ -263,7 +263,7 @@ field support has a `nested` sort option with the following properties: NOTE: Elasticsearch will throw an error if a nested field is defined in a sort without a `nested` context. -===== Nested sorting examples +====== Nested sorting examples In the below example `offer` is a field of type `nested`. The nested `path` needs to be specified; otherwise, Elasticsearch doesn't know on what nested level sort values need to be captured. @@ -343,7 +343,7 @@ POST /_search Nested sorting is also supported when sorting by scripts and sorting by geo distance. -==== Missing Values +===== Missing Values The `missing` parameter specifies how docs which are missing the sort field should be treated: The `missing` value can be @@ -370,7 +370,7 @@ GET /_search NOTE: If a nested inner object doesn't match with the `nested.filter` then a missing value is used. -==== Ignoring Unmapped Fields +===== Ignoring Unmapped Fields By default, the search request will fail if there is no mapping associated with a field. The `unmapped_type` option allows you to ignore @@ -397,7 +397,7 @@ then Elasticsearch will handle it as if there was a mapping of type `long`, with all documents in this index having no value for this field. [[geo-sorting]] -==== Geo Distance Sorting +===== Geo Distance Sorting Allow to sort by `_geo_distance`. Here is an example, assuming `pin.location` is a field of type `geo_point`: @@ -453,7 +453,7 @@ have values for the field that is used for distance computation. The following formats are supported in providing the coordinates: -===== Lat Lon as Properties +====== Lat Lon as Properties [source,js] -------------------------------------------------- @@ -478,7 +478,7 @@ GET /_search -------------------------------------------------- // CONSOLE -===== Lat Lon as String +====== Lat Lon as String Format in `lat,lon`. @@ -502,7 +502,7 @@ GET /_search -------------------------------------------------- // CONSOLE -===== Geohash +====== Geohash [source,js] -------------------------------------------------- @@ -524,7 +524,7 @@ GET /_search -------------------------------------------------- // CONSOLE -===== Lat Lon as Array +====== Lat Lon as Array Format in `[lon, lat]`, note, the order of lon/lat here in order to conform with http://geojson.org/[GeoJSON]. @@ -550,7 +550,7 @@ GET /_search // CONSOLE -==== Multiple reference points +===== Multiple reference points Multiple geo points can be passed as an array containing any `geo_point` format, for example @@ -580,7 +580,7 @@ The final distance for a document will then be `min`/`max`/`avg` (defined via `m -==== Script Based Sorting +===== Script Based Sorting Allow to sort based on custom scripts, here is an example: @@ -609,7 +609,7 @@ GET /_search // CONSOLE -==== Track Scores +===== Track Scores When sorting on a field, scores are not computed. By setting `track_scores` to true, scores will still be computed and tracked. @@ -631,7 +631,7 @@ GET /_search -------------------------------------------------- // CONSOLE -==== Memory Considerations +===== Memory Considerations When sorting, the relevant sorted field values are loaded into memory. This means that per shard, there should be enough memory to contain diff --git a/docs/reference/search/request/source-filtering.asciidoc b/docs/reference/search/request/source-filtering.asciidoc index 483d3311d7097..467c31356b719 100644 --- a/docs/reference/search/request/source-filtering.asciidoc +++ b/docs/reference/search/request/source-filtering.asciidoc @@ -1,5 +1,5 @@ -[[search-request-source-filtering]] -=== Source filtering +[[request-body-search-source-filtering]] +==== Source filtering Allows to control how the `_source` field is returned with every hit. diff --git a/docs/reference/search/request/stored-fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc index b55e0fce45757..550c3b81af4cf 100644 --- a/docs/reference/search/request/stored-fields.asciidoc +++ b/docs/reference/search/request/stored-fields.asciidoc @@ -1,9 +1,9 @@ -[[search-request-stored-fields]] -=== Stored Fields +[[request-body-search-stored-fields]] +==== Stored Fields WARNING: The `stored_fields` parameter is about fields that are explicitly marked as stored in the mapping, which is off by default and generally not recommended. -Use <> instead to select +Use <> instead to select subsets of the original source document to be returned. Allows to selectively load specific stored fields for each document represented @@ -52,9 +52,9 @@ things like `_source.obj1.field1` can be used, though not recommended, as NOTE: On its own, `stored_fields` cannot be used to load fields in nested objects -- if a field contains a nested object in its path, then no data will be returned for that stored field. To access nested fields, `stored_fields` -must be used within an <> block. +must be used within an <> block. -==== Disable stored fields entirely +===== Disable stored fields entirely To disable the stored fields (and metadata fields) entirely use: `_none_`: @@ -70,5 +70,5 @@ GET /_search -------------------------------------------------- // CONSOLE -NOTE: <> and <> parameters cannot be activated if `_none_` is used. +NOTE: <> and <> parameters cannot be activated if `_none_` is used. diff --git a/docs/reference/search/request/track-total-hits.asciidoc b/docs/reference/search/request/track-total-hits.asciidoc index 210f6321816e0..1e9ede3ae94d7 100644 --- a/docs/reference/search/request/track-total-hits.asciidoc +++ b/docs/reference/search/request/track-total-hits.asciidoc @@ -1,5 +1,5 @@ -[[search-request-track-total-hits]] -=== Track total hits +[[request-body-search-track-total-hits]] +==== Track total hits Generally the total hit count can't be computed accurately without visiting all matches, which is costly for queries that match lots of documents. The diff --git a/docs/reference/search/request/version.asciidoc b/docs/reference/search/request/version.asciidoc index 57c6ce27feb91..527481348e332 100644 --- a/docs/reference/search/request/version.asciidoc +++ b/docs/reference/search/request/version.asciidoc @@ -1,5 +1,5 @@ -[[search-request-version]] -=== Version +[[request-body-search-version]] +==== Version Returns a version for each search hit. diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 90ee35afa6172..9912a791af138 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -1,5 +1,5 @@ [[search-shards]] -== Search Shards API +=== Search Shards API The search shards api returns the indices and shards that a search request would be executed against. This can give useful feedback for working out issues or @@ -9,7 +9,7 @@ are used, the filter is returned as part of the `indices` section [5.1.0] Added The `index` may be a single value, or comma-separated. [float] -=== Usage +==== Usage Full example: @@ -149,7 +149,7 @@ This time the search will only be executed against two of the shards, because routing values have been specified. [float] -=== All parameters: +==== All parameters: [horizontal] `routing`:: @@ -159,7 +159,7 @@ routing values have been specified. `preference`:: Controls a `preference` of which shard replicas to execute the search request on. By default, the operation is randomized between the shard - replicas. See the link:search-request-preference.html[preference] + replicas. See the link:search-request-body.html#request-body-search-preference[preference] documentation for a list of all acceptable values. `local`:: diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc index 6b96564e9c0fe..f68a71c10c2c1 100644 --- a/docs/reference/search/search-template.asciidoc +++ b/docs/reference/search/search-template.asciidoc @@ -1,5 +1,5 @@ [[search-template]] -== Search Template +=== Search Template The `/_search/template` endpoint allows to use the mustache language to pre render search requests, before they are executed and fill existing templates with template parameters. @@ -596,7 +596,7 @@ GET _search/template // TEST[catch:missing] [[multi-search-template]] -== Multi Search Template +=== Multi Search Template The multi search template API allows to execute several search template requests within the same API using the `_msearch/template` endpoint. diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index f72eb7d68227e..3fbeeaa29e4a9 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -1,5 +1,5 @@ [[search-search]] -== Search +=== Search The search API allows you to execute a search query and get back search hits that match the query. The query can either be provided using a simple @@ -7,7 +7,7 @@ that match the query. The query can either be provided using a simple <>. ["float",id="search-multi-index"] -=== Multi-Index +==== Multi-Index All search APIs can be applied across multiple indices with support for the <>. For @@ -41,5 +41,5 @@ GET /_all/_search?q=tag:wow [float] [[search-partial-responses]] -=== Partial responses +==== Partial responses To ensure fast responses, the search API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/search/suggesters.asciidoc b/docs/reference/search/suggesters.asciidoc index a84afeea52c2b..78aa5d3628564 100644 --- a/docs/reference/search/suggesters.asciidoc +++ b/docs/reference/search/suggesters.asciidoc @@ -1,5 +1,5 @@ [[search-suggesters]] -== Suggesters +=== Suggesters The suggest feature suggests similar looking terms based on a provided text by using a suggester. Parts of the suggest feature are still under @@ -109,7 +109,7 @@ term suggester's score is based on the edit distance. [float] [[global-suggest]] -=== Global suggest text +==== Global suggest text To avoid repetition of the suggest text, it is possible to define a global text. In the example below the suggest text is defined globally diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 0ed0601b086ac..1090eb6652dcc 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -1,5 +1,5 @@ -[[search-suggesters-completion]] -=== Completion Suggester +[[completion-suggester]] +==== Completion Suggester NOTE: In order to understand the format of suggestions, please read the <> page first. For more flexible @@ -19,7 +19,7 @@ The suggester uses data structures that enable fast lookups, but are costly to build and are stored in-memory. [[completion-suggester-mapping]] -==== Mapping +===== Mapping To use this feature, specify a special mapping for this field, which indexes the field values for fast completions. @@ -74,7 +74,7 @@ Mapping supports the following parameters: than a handful of characters. [[indexing]] -==== Indexing +===== Indexing You index suggestions like any other field. A suggestion is made of an `input` and an optional `weight` attribute. An `input` is the expected @@ -141,7 +141,7 @@ PUT music/_doc/1?refresh // TEST[continued] [[querying]] -==== Querying +===== Querying Suggesting works as usual, except that you have to specify the suggest type as `completion`. Suggestions are near real-time, which means @@ -214,7 +214,7 @@ The configured weight for a suggestion is returned as `_score`. The return the full document `_source` by default. The size of the `_source` can impact performance due to disk fetch and network transport overhead. To save some network overhead, filter out unnecessary fields from the `_source` -using <> to minimize +using <> to minimize `_source` size. Note that the _suggest endpoint doesn't support source filtering but using suggest on the `_search` endpoint does: @@ -291,7 +291,7 @@ The basic completion suggester query supports the following parameters: `skip_duplicates`:: Whether duplicate suggestions should be filtered out (defaults to `false`). NOTE: The completion suggester considers all documents in the index. -See <> for an explanation of how to query a subset of +See <> for an explanation of how to query a subset of documents instead. NOTE: In case of completion queries spanning more than one shard, the suggest @@ -304,7 +304,7 @@ shard size, it is still recommended to break index into multiple shards instead of optimizing for completion performance. [[skip_duplicates]] -==== Skip duplicate suggestions +===== Skip duplicate suggestions Queries can return duplicate suggestions coming from different documents. It is possible to modify this behavior by setting `skip_duplicates` to true. @@ -331,7 +331,7 @@ WARNING: When set to true, this option can slow down search because more suggest need to be visited to find the top N. [[fuzzy]] -==== Fuzzy queries +===== Fuzzy queries The completion suggester also supports fuzzy queries -- this means you can have a typo in your search and still get results back. @@ -390,7 +390,7 @@ NOTE: If you want to stick with the default values, but or `fuzzy: true`. [[regex]] -==== Regex queries +===== Regex queries The completion suggester also supports regex queries meaning you can express a prefix as a regular expression diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 77dd5745e159b..f4ca6e91cd9ae 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -1,5 +1,5 @@ -[[suggester-context]] -=== Context Suggester +[[context-suggester]] +==== Context Suggester The completion suggester considers all documents in the index, but it is often desirable to serve suggestions filtered and/or boosted by some criteria. @@ -87,7 +87,7 @@ is entirely heap resident, you can monitor the completion field index size using [[suggester-context-category]] [float] -==== Category Context +===== Category Context The `category` context allows you to associate one or more categories with suggestions at index time. At query time, suggestions can be filtered and boosted by their associated categories. @@ -131,7 +131,7 @@ of categories. [float] -===== Category Query +====== Category Query Suggestions can be filtered by one or more categories. The following filters suggestions by multiple categories: @@ -220,7 +220,7 @@ maximum score produced by any matching contexts. [[suggester-context-geo]] [float] -==== Geo location Context +===== Geo location Context A `geo` context allows you to associate one or more geo points or geohashes with suggestions at index time. At query time, suggestions can be filtered and boosted if they are within @@ -229,7 +229,7 @@ a certain distance of a specified geo location. Internally, geo points are encoded as geohashes with the specified precision. [float] -===== Geo Mapping +====== Geo Mapping In addition to the `path` setting, `geo` context mapping accepts the following settings: @@ -243,7 +243,7 @@ NOTE: The index time `precision` setting sets the maximum geohash precision that can be used at query time. [float] -===== Indexing geo contexts +====== Indexing geo contexts `geo` contexts can be explicitly set with suggestions or be indexed from a geo point field in the document via the `path` parameter, similar to `category` contexts. Associating multiple geo location context @@ -274,7 +274,7 @@ PUT place/_doc/1 // CONSOLE [float] -===== Geo location Query +====== Geo location Query Suggestions can be filtered and boosted with respect to how close they are to one or more geo points. The following filters suggestions that fall within the area represented by diff --git a/docs/reference/search/suggesters/misc.asciidoc b/docs/reference/search/suggesters/misc.asciidoc index 6866df24c0960..1cd6e6fdbfc52 100644 --- a/docs/reference/search/suggesters/misc.asciidoc +++ b/docs/reference/search/suggesters/misc.asciidoc @@ -1,5 +1,5 @@ -[[returning-suggesters-type]] -=== Returning the type of the suggester +[[return-suggesters-type]] +==== Returning the type of the suggester Sometimes you need to know the exact type of a suggester in order to parse its results. The `typed_keys` parameter can be used to change the suggester's name in the response so that it will be prefixed by its type. diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index 2d0e3e218e62c..46dfec5db1c1a 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -1,5 +1,5 @@ -[[search-suggesters-phrase]] -=== Phrase Suggester +[[phrase-suggester]] +==== Phrase Suggester NOTE: In order to understand the format of suggestions, please read the <> page first. @@ -15,7 +15,7 @@ based on `ngram-language` models. In practice this suggester will be able to make better decisions about which tokens to pick based on co-occurrence and frequencies. -==== API Example +===== API Example In general the `phrase` suggester requires special mapping up front to work. The `phrase` suggester examples on this page need the following mapping to @@ -33,12 +33,12 @@ PUT test "trigram": { "type": "custom", "tokenizer": "standard", - "filter": ["shingle"] + "filter": ["lowercase","shingle"] }, "reverse": { "type": "custom", "tokenizer": "standard", - "filter": ["reverse"] + "filter": ["lowercase","reverse"] } }, "filter": { @@ -135,7 +135,7 @@ The response contains suggestions scored by the most likely spell correction fir // TESTRESPONSE[s/"hits": .../"hits": "$body.hits",/] // TESTRESPONSE[s/"took": 3,/"took": "$body.took",/] -==== Basic Phrase suggest API parameters +===== Basic Phrase suggest API parameters [horizontal] `field`:: @@ -263,7 +263,7 @@ POST test/_search option indicating whether the generated phrase matched any document. -==== Smoothing Models +===== Smoothing Models The `phrase` suggester supports multiple smoothing models to balance weight between infrequent grams (grams (shingles) are not existing in @@ -313,7 +313,7 @@ POST test/_search -------------------------------------------------- // CONSOLE -==== Candidate Generators +===== Candidate Generators The `phrase` suggester uses candidate generators to produce a list of possible terms per term in the given text. A single candidate generator @@ -326,7 +326,7 @@ Currently only one type of candidate generator is supported, the under the key `direct_generator`; each of the generators in the list is called per term in the original text. -==== Direct Generators +===== Direct Generators The direct generators support the following parameters: diff --git a/docs/reference/search/suggesters/term-suggest.asciidoc b/docs/reference/search/suggesters/term-suggest.asciidoc index f9dd0c9133542..3fe3ec8be4eac 100644 --- a/docs/reference/search/suggesters/term-suggest.asciidoc +++ b/docs/reference/search/suggesters/term-suggest.asciidoc @@ -1,5 +1,5 @@ -[[search-suggesters-term]] -=== Term suggester +[[term-suggester]] +==== Term suggester NOTE: In order to understand the format of suggestions, please read the <> page first. @@ -9,7 +9,7 @@ suggest text is analyzed before terms are suggested. The suggested terms are provided per analyzed suggest text token. The `term` suggester doesn't take the query into account that is part of request. -==== Common suggest options: +===== Common suggest options: [horizontal] `text`:: @@ -50,7 +50,7 @@ doesn't take the query into account that is part of request. ** `always`: Suggest any matching suggestions based on terms in the suggest text. -==== Other term suggest options: +===== Other term suggest options: [horizontal] `lowercase_terms`:: diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index 7bf769c6d7f47..d4f058f3c4d74 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -1,5 +1,5 @@ [[search-uri-request]] -== URI Search +=== URI Search A search request can be executed purely using a URI by providing request parameters. Not all search options are exposed when executing a search @@ -52,7 +52,7 @@ And here is a sample response: // TESTRESPONSE[s/"took": 62/"took": "$body.took"/] [float] -=== Parameters +==== Parameters The parameters allowed in the URI are: @@ -86,7 +86,7 @@ providing text to a numeric field) to be ignored. Defaults to false. hits was computed. |`_source`|Set to `false` to disable retrieval of the `_source` field. You can also retrieve -part of the document by using `_source_includes` & `_source_excludes` (see the <> +part of the document by using `_source_includes` & `_source_excludes` (see the <> documentation for more details) |`stored_fields` |The selective stored fields of the document to return for each hit, @@ -105,7 +105,7 @@ scores and return them as part of each hit. of the total number of hits that match the query. It also accepts an integer which in this case represents the number of hits to count accurately. -(See the <> documentation +(See the <> documentation for more details). |`timeout` |A search timeout, bounding the search request to be executed @@ -125,7 +125,7 @@ Defaults to no terminate_after. |`search_type` |The type of the search operation to perform. Can be `dfs_query_then_fetch` or `query_then_fetch`. Defaults to `query_then_fetch`. See -<> for +<> for more details on the different types of search that can be performed. |`allow_partial_search_results` |Set to `false` to return an overall failure if the request would produce diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 47eab847336ce..6e27059837071 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -1,5 +1,5 @@ [[search-validate]] -== Validate API +=== Validate API The validate API allows a user to validate a potentially expensive query without executing it. We'll use the following test data to explain _validate: diff --git a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc index a24e272dd8937..635c8a1450f5d 100644 --- a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc +++ b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc @@ -31,17 +31,12 @@ information, see <>. For more information about encrypting communications across the Elastic Stack, see {stack-ov}/encrypting-communications.html[Encrypting Communications]. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/node-certificates.asciidoc include::node-certificates.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-transport.asciidoc include::tls-transport.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-http.asciidoc include::tls-http.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-ad.asciidoc include::tls-ad.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-ldap.asciidoc include::tls-ldap.asciidoc[] \ No newline at end of file diff --git a/docs/reference/settings/data-frames-settings.asciidoc b/docs/reference/settings/data-frames-settings.asciidoc new file mode 100644 index 0000000000000..e550063f7b459 --- /dev/null +++ b/docs/reference/settings/data-frames-settings.asciidoc @@ -0,0 +1,40 @@ + +[role="xpack"] +[[data-frames-settings]] +=== {dataframe-transforms-cap} settings in Elasticsearch +[subs="attributes"] +++++ +{dataframe-transforms-cap} settings +++++ + +You do not need to configure any settings to use {dataframe-transforms}. It is enabled by default. + +All of these settings can be added to the `elasticsearch.yml` configuration file. +The dynamic settings can also be updated across a cluster with the +<>. + +TIP: Dynamic settings take precedence over settings in the `elasticsearch.yml` +file. + +[float] +[[general-data-frames-settings]] +==== General {dataframe-transforms} settings + +`xpack.data_frame.enabled`:: +Set to `true` (default) to enable {dataframe-transforms} on the node. + ++ +If set to `false` in `elasticsearch.yml`, the {dataframe-transform} APIs are disabled on the node. +Therefore the node cannot start or administrate transforms or receive transport (internal) +communication requests related to {dataframe-transform} APIs. ++ +IMPORTANT: If you want to use {dataframe-transform} features in your cluster, you must have +`xpack.data_frame.enabled` set to `true` on all master-eligible nodes. This is the +default behavior. + +`xpack.data_frame.num_transform_failure_retries` (<>):: +The number of times that a {dataframe-transform} retries when it experiences a +non-fatal error. Once the number of retries is exhausted, the {dataframe-transform} +task will be marked as `failed`. The default value is `10` with a valid minimum of `0` +and maximum of `100`. +If a {dataframe-transform} is already running, it will have to be restarted +to use the changed setting. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 98a0b9b30c186..ee48257565ff5 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -128,6 +128,18 @@ level security]. Set to `false` to prevent document and field level security from being configured. Defaults to `true`. +`xpack.security.dls.bitset.cache.ttl`:: +The time-to-live for cached `BitSet` entries for document level security. +Document level security queries may depend on Lucene BitSet objects, and these are +automatically cached to improve performance. Defaults to expire entries that are +unused for `168h` (7 days). + +`xpack.security.dls.bitset.cache.size`:: +The maximum memory usage of cached `BitSet` entries for document level security. +Document level security queries may depend on Lucene BitSet objects, and these are +automatically cached to improve performance. Defaults to `50mb`, after which +least-recently-used entries will be evicted. + [float] [[token-service-settings]] ==== Token service settings diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index 8fd5a44443ddc..6d8504f6b5755 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -51,6 +51,8 @@ include::settings/audit-settings.asciidoc[] include::settings/ccr-settings.asciidoc[] +include::settings/data-frames-settings.asciidoc[] + include::settings/ilm-settings.asciidoc[] include::settings/license-settings.asciidoc[] diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index 8140fe162d9de..4fb644a62ba53 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -310,7 +310,7 @@ Which looks like: Note that the `columns` object is only part of the first page. You've reached the last page when there is no `cursor` returned -in the results. Like Elasticsearch's <>, +in the results. Like Elasticsearch's <>, SQL may keep state in Elasticsearch to support the cursor. Unlike scroll, receiving the last page is enough to guarantee that the Elasticsearch state is cleared. diff --git a/docs/reference/sql/endpoints/translate.asciidoc b/docs/reference/sql/endpoints/translate.asciidoc index 6a347ff614af7..ca6228ac66f02 100644 --- a/docs/reference/sql/endpoints/translate.asciidoc +++ b/docs/reference/sql/endpoints/translate.asciidoc @@ -53,7 +53,7 @@ Which returns: // TESTRESPONSE Which is the request that SQL will run to provide the results. -In this case, SQL will use the <> +In this case, SQL will use the <> API. If the result contained an aggregation then SQL would use the normal <> API. diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc index fc9a85ce97e4d..bb8680ac183ca 100644 --- a/docs/reference/sql/functions/geo.asciidoc +++ b/docs/reference/sql/functions/geo.asciidoc @@ -71,7 +71,7 @@ Returns the geometry from WKT representation. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] +include-tagged::{sql-specs}/docs/geo.csv-spec[wkttosql] -------------------------------------------------- ==== Geometry Properties diff --git a/docs/reference/sql/language/syntax/lexic/index.asciidoc b/docs/reference/sql/language/syntax/lexic/index.asciidoc index a668ee724e56f..9b2f78c35cd90 100644 --- a/docs/reference/sql/language/syntax/lexic/index.asciidoc +++ b/docs/reference/sql/language/syntax/lexic/index.asciidoc @@ -121,6 +121,9 @@ SELECT "first_name" <1> <1> Double quotes `"` used for column and table identifiers <2> Single quotes `'` used for a string literal +NOTE:: to escape single or double quotes, one needs to use that specific quote one more time. For example, the literal `John's` can be escaped like +`SELECT 'John''s' AS name`. The same goes for double quotes escaping - `SELECT 123 AS "test""number"` will display as a result a column with the name `test"number`. + [[sql-syntax-special-chars]] ==== Special characters diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 52b14d642b620..d5d78bc04c912 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -20,7 +20,7 @@ include::disable-shard-alloc.asciidoc[] . *Stop indexing and perform a synced flush.* + -- -Performing a <> speeds up shard +Performing a <> speeds up shard recovery. include::synced-flush.asciidoc[] diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 7ad4a0bb904c1..5ee75dd62b896 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -30,7 +30,7 @@ include::disable-shard-alloc.asciidoc[] -- While you can continue indexing during the upgrade, shard recovery is much faster if you temporarily stop non-essential indexing and perform a -<>. +<>. include::synced-flush.asciidoc[] @@ -133,7 +133,7 @@ As soon as another node is upgraded, the replicas can be assigned and the status will change to `green`. ==================================================== -Shards that were not <> might take longer to +Shards that were not <> might take longer to recover. You can monitor the recovery status of individual shards by submitting a <> request: diff --git a/docs/reference/vectors/vector-functions.asciidoc b/docs/reference/vectors/vector-functions.asciidoc new file mode 100644 index 0000000000000..d08af2d03bfab --- /dev/null +++ b/docs/reference/vectors/vector-functions.asciidoc @@ -0,0 +1,279 @@ +[role="xpack"] +[testenv="basic"] +[[vector-functions]] +===== Functions for vector fields + +experimental[] + +These functions are used for +for <> and +<> fields. + +NOTE: During vector functions' calculation, all matched documents are +linearly scanned. Thus, expect the query time grow linearly +with the number of matched documents. For this reason, we recommend +to limit the number of matched documents with a `query` parameter. + +Let's create an index with the following mapping and index a couple +of documents into it. + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "properties": { + "my_dense_vector": { + "type": "dense_vector", + "dims": 3 + }, + "my_sparse_vector" : { + "type" : "sparse_vector" + } + } + } +} + +PUT my_index/_doc/1 +{ + "my_dense_vector": [0.5, 10, 6], + "my_sparse_vector": {"2": 1.5, "15" : 2, "50": -1.1, "4545": 1.1} +} + +PUT my_index/_doc/2 +{ + "my_dense_vector": [-0.5, 10, 10], + "my_sparse_vector": {"2": 2.5, "10" : 1.3, "55": -2.3, "113": 1.6} +} + +-------------------------------------------------- +// CONSOLE +// TESTSETUP + +For dense_vector fields, `cosineSimilarity` calculates the measure of +cosine similarity between a given query vector and document vectors. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "cosineSimilarity(params.query_vector, doc['my_dense_vector']) + 1.0", <1> + "params": { + "query_vector": [4, 3.4, -0.2] <2> + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +<1> The script adds 1.0 to the cosine similarity to prevent the score from being negative. +<2> To take advantage of the script optimizations, provide a query vector as a script parameter. + +NOTE: If a document's dense vector field has a number of dimensions +different from the query's vector, an error will be thrown. + +Similarly, for sparse_vector fields, `cosineSimilaritySparse` calculates cosine similarity +between a given query vector and document vectors. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "cosineSimilaritySparse(params.query_vector, doc['my_sparse_vector']) + 1.0", + "params": { + "query_vector": {"2": 0.5, "10" : 111.3, "50": -1.3, "113": 14.8, "4545": 156.0} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +For dense_vector fields, `dotProduct` calculates the measure of +dot product between a given query vector and document vectors. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": """ + double value = dotProduct(params.query_vector, doc['my_dense_vector']); + return sigmoid(1, Math.E, -value); <1> + """, + "params": { + "query_vector": [4, 3.4, -0.2] + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> Using the standard sigmoid function prevents scores from being negative. + +Similarly, for sparse_vector fields, `dotProductSparse` calculates dot product +between a given query vector and document vectors. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": """ + double value = dotProductSparse(params.query_vector, doc['my_sparse_vector']); + return sigmoid(1, Math.E, -value); + """, + "params": { + "query_vector": {"2": 0.5, "10" : 111.3, "50": -1.3, "113": 14.8, "4545": 156.0} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +For dense_vector fields, `l1norm` calculates L^1^ distance +(Manhattan distance) between a given query vector and +document vectors. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "1 / (1 + l1norm(params.queryVector, doc['my_dense_vector']))", <1> + "params": { + "queryVector": [4, 3.4, -0.2] + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> Unlike `cosineSimilarity` that represent similarity, `l1norm` and +`l2norm` shown below represent distances or differences. This means, that +the more similar the vectors are, the lower the scores will be that are +produced by the `l1norm` and `l2norm` functions. +Thus, as we need more similar vectors to score higher, +we reversed the output from `l1norm` and `l2norm`. Also, to avoid +division by 0 when a document vector matches the query exactly, +we added `1` in the denominator. + +For sparse_vector fields, `l1normSparse` calculates L^1^ distance +between a given query vector and document vectors. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "1 / (1 + l1normSparse(params.queryVector, doc['my_sparse_vector']))", + "params": { + "queryVector": {"2": 0.5, "10" : 111.3, "50": -1.3, "113": 14.8, "4545": 156.0} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +For dense_vector fields, `l2norm` calculates L^2^ distance +(Euclidean distance) between a given query vector and +document vectors. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "1 / (1 + l2norm(params.queryVector, doc['my_dense_vector']))", + "params": { + "queryVector": [4, 3.4, -0.2] + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +Similarly, for sparse_vector fields, `l2normSparse` calculates L^2^ distance +between a given query vector and document vectors. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "1 / (1 + l2normSparse(params.queryVector, doc['my_sparse_vector']))", + "params": { + "queryVector": {"2": 0.5, "10" : 111.3, "50": -1.3, "113": 14.8, "4545": 156.0} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +NOTE: If a document doesn't have a value for a vector field on which +a vector function is executed, an error will be thrown. + +You can check if a document has a value for the field `my_vector` by +`doc['my_vector'].size() == 0`. Your overall script can look like this: + +[source,js] +-------------------------------------------------- +"source": "doc['my_vector'].size() == 0 ? 0 : cosineSimilarity(params.queryVector, doc['my_vector'])" +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index e87df16264e94..ffdabd6d63399 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ClientYamlDocsTestClient; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ClientYamlTestClient; @@ -41,6 +42,7 @@ import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.section.ExecutableSection; +import org.junit.After; import java.io.IOException; import java.util.ArrayList; @@ -97,6 +99,23 @@ protected ClientYamlTestClient initClientYamlTestClient( return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion, masterVersion, this::getClientBuilderWithSniffedHosts); } + @After + public void cleanup() throws Exception { + if (isMachineLearningTest() || isDataFrameTest()) { + ESRestTestCase.waitForPendingTasks(adminClient()); + } + } + + protected boolean isMachineLearningTest() { + String testName = getTestName(); + return testName != null && (testName.contains("/ml/") || testName.contains("\\ml\\")); + } + + protected boolean isDataFrameTest() { + String testName = getTestName(); + return testName != null && (testName.contains("/data-frames/") || testName.contains("\\data-frames\\")); + } + /** * Compares the results of running two analyzers against many random * strings. The goal is to figure out if two anlayzers are "the same" by diff --git a/gradle/build-scan.gradle b/gradle/build-scan.gradle new file mode 100644 index 0000000000000..68d1d1e71ad46 --- /dev/null +++ b/gradle/build-scan.gradle @@ -0,0 +1,43 @@ +import nebula.plugin.info.scm.ScmInfoExtension + +buildScan { + def jenkinsUrl = System.getenv('JENKINS_URL') ? new URL(System.getenv('JENKINS_URL')) : null + + // Accept Gradle ToS when project property org.elasticsearch.acceptScanTOS=true or this is an Elastic CI build + if (jenkinsUrl?.host?.endsWith('elastic.co') || Boolean.valueOf(project.findProperty('org.elasticsearch.acceptScanTOS') ?: "false")) { + termsOfServiceUrl = 'https://gradle.com/terms-of-service' + termsOfServiceAgree = 'yes' + } + + // Jenkins-specific build scan metadata + if (jenkinsUrl) { + tag 'CI' + tag System.getenv('JOB_NAME') + link 'Jenkins Build', System.getenv('BUILD_URL') + System.getenv('NODE_LABELS').split(' ').each { + value 'Jenkins Worker Label', it + } + + // Capture changes included in this CI build except for pull request builds + if (System.getenv('GIT_COMMIT') && System.getenv('ROOT_BUILD_CAUSE_GHPRBCAUSE') == null) { + background { + def changes = "git diff --name-only ${System.getenv('GIT_PREVIOUS_COMMIT')}..${System.getenv('GIT_COMMIT')}".execute().text.trim() + value 'Git Changes', changes + } + } + } else { + tag 'LOCAL' + } + + // Add SCM information + def scmInfo = project.extensions.findByType(ScmInfoExtension) + if (scmInfo && scmInfo.change && scmInfo.branch) { + value 'Git Commit ID', scmInfo.change + // Don't tag the branch if we are in a detached head state + if (scmInfo.branch ==~ /[0-9a-f]{5,40}/ == false) { + value 'Git Branch', scmInfo.branch + tag scmInfo.branch + } + link 'Source', "https://github.com/elastic/elasticsearch/commit/${scmInfo.change}" + } +} \ No newline at end of file diff --git a/libs/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java index f22087c6e7d8d..1330a365d3dbd 100644 --- a/libs/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java +++ b/libs/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java @@ -19,6 +19,7 @@ package org.elasticsearch.bootstrap; +import java.math.BigInteger; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -28,12 +29,14 @@ public class JavaVersion implements Comparable { private final List version; + private final String prePart; public List getVersion() { return version; } - private JavaVersion(List version) { + private JavaVersion(List version, String prePart) { + this.prePart = prePart; if (version.size() >= 2 && version.get(0) == 1 && version.get(1) == 8) { // for Java 8 there is ambiguity since both 1.8 and 8 are supported, // so we rewrite the former to the latter @@ -42,23 +45,38 @@ private JavaVersion(List version) { this.version = Collections.unmodifiableList(version); } + /** + * Parses the Java version as it can be retrieved as the value of java.version or + * java.specification.version according to JEP 223. + * + * @param value The version String + */ public static JavaVersion parse(String value) { Objects.requireNonNull(value); + String prePart = null; if (!isValid(value)) { - throw new IllegalArgumentException("value"); + throw new IllegalArgumentException("Java version string [" + value + "] could not be parsed."); } - List version = new ArrayList<>(); - String[] components = value.split("\\."); - for (String component : components) { - version.add(Integer.valueOf(component)); + String[] parts = value.split("-"); + String[] numericComponents; + if (parts.length == 1) { + numericComponents = value.split("\\."); + } else if (parts.length == 2) { + numericComponents = parts[0].split("\\."); + prePart = parts[1]; + } else { + throw new IllegalArgumentException("Java version string [" + value + "] could not be parsed."); } - return new JavaVersion(version); + for (String component : numericComponents) { + version.add(Integer.valueOf(component)); + } + return new JavaVersion(version, prePart); } public static boolean isValid(String value) { - return value.matches("^0*[0-9]+(\\.[0-9]+)*$"); + return value.matches("^0*[0-9]+(\\.[0-9]+)*(-[a-zA-Z0-9]+)?$"); } private static final JavaVersion CURRENT = parse(System.getProperty("java.specification.version")); @@ -78,9 +96,26 @@ public int compareTo(JavaVersion o) { if (s > d) return -1; } + if (prePart != null && o.prePart == null) { + return -1; + } else if (prePart == null && o.prePart != null) { + return 1; + } else if (prePart != null && o.prePart != null) { + return comparePrePart(prePart, o.prePart); + } return 0; } + private int comparePrePart(String prePart, String otherPrePart) { + if (prePart.matches("\\d+")) { + return otherPrePart.matches("\\d+") ? + (new BigInteger(prePart)).compareTo(new BigInteger(otherPrePart)) : -1; + } else { + return otherPrePart.matches("\\d+") ? + 1 : prePart.compareTo(otherPrePart); + } + } + @Override public boolean equals(Object o) { if (o == null || o.getClass() != getClass()) { @@ -96,6 +131,7 @@ public int hashCode() { @Override public String toString() { - return version.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(".")); + final String versionString = version.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(".")); + return prePart != null ? versionString + "-" + prePart : versionString; } } diff --git a/server/src/main/java/org/elasticsearch/common/CheckedRunnable.java b/libs/core/src/main/java/org/elasticsearch/common/CheckedRunnable.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/CheckedRunnable.java rename to libs/core/src/main/java/org/elasticsearch/common/CheckedRunnable.java diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/StandardValidator.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/StandardValidator.java new file mode 100644 index 0000000000000..37a3a5bac5e74 --- /dev/null +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/StandardValidator.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.geo.utils; + +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; + +/** + * Validator that only checks that altitude only shows up if ignoreZValue is set to true. + */ +public class StandardValidator implements GeometryValidator { + + private final boolean ignoreZValue; + + public StandardValidator(boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + } + + protected void checkAltitude(double zValue) { + if (ignoreZValue == false && Double.isNaN(zValue) == false) { + throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] " + + "parameter is [" + ignoreZValue + "]"); + } + } + + @Override + public void validate(Geometry geometry) { + if (ignoreZValue == false) { + geometry.visit(new GeometryVisitor() { + + @Override + public Void visit(Circle circle) throws RuntimeException { + checkAltitude(circle.getAlt()); + return null; + } + + @Override + public Void visit(GeometryCollection collection) throws RuntimeException { + for (Geometry g : collection) { + g.visit(this); + } + return null; + } + + @Override + public Void visit(Line line) throws RuntimeException { + for (int i = 0; i < line.length(); i++) { + checkAltitude(line.getAlt(i)); + } + return null; + } + + @Override + public Void visit(LinearRing ring) throws RuntimeException { + for (int i = 0; i < ring.length(); i++) { + checkAltitude(ring.getAlt(i)); + } + return null; + } + + @Override + public Void visit(MultiLine multiLine) throws RuntimeException { + return visit((GeometryCollection) multiLine); + } + + @Override + public Void visit(MultiPoint multiPoint) throws RuntimeException { + return visit((GeometryCollection) multiPoint); + } + + @Override + public Void visit(MultiPolygon multiPolygon) throws RuntimeException { + return visit((GeometryCollection) multiPolygon); + } + + @Override + public Void visit(Point point) throws RuntimeException { + checkAltitude(point.getAlt()); + return null; + } + + @Override + public Void visit(Polygon polygon) throws RuntimeException { + polygon.getPolygon().visit(this); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + polygon.getHole(i).visit(this); + } + return null; + } + + @Override + public Void visit(Rectangle rectangle) throws RuntimeException { + checkAltitude(rectangle.getMinAlt()); + checkAltitude(rectangle.getMaxAlt()); + return null; + } + }); + } + } +} + diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java index 073bff4cb7575..841983da8daa8 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java @@ -28,10 +28,7 @@ import java.io.IOException; import java.text.ParseException; -import java.util.ArrayList; -import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; abstract class BaseGeometryTestCase extends AbstractWireTestCase { @@ -131,123 +128,4 @@ public Object visit(Rectangle rectangle) { assertEquals("result", result); } - public static double randomLat() { - return randomDoubleBetween(-90, 90, true); - } - - public static double randomLon() { - return randomDoubleBetween(-180, 180, true); - } - - public static Circle randomCircle(boolean hasAlt) { - if (hasAlt) { - return new Circle(randomDoubleBetween(-90, 90, true), randomDoubleBetween(-180, 180, true), randomDouble(), - randomDoubleBetween(0, 100, false)); - } else { - return new Circle(randomDoubleBetween(-90, 90, true), randomDoubleBetween(-180, 180, true), randomDoubleBetween(0, 100, false)); - } - } - - public static Line randomLine() { - return randomLine(randomBoolean()); - } - - public static Line randomLine(boolean hasAlts) { - int size = randomIntBetween(2, 10); - double[] lats = new double[size]; - double[] lons = new double[size]; - double[] alts = hasAlts ? new double[size] : null; - for (int i = 0; i < size; i++) { - lats[i] = randomLat(); - lons[i] = randomLon(); - if (hasAlts) { - alts[i] = randomDouble(); - } - } - if (hasAlts) { - return new Line(lats, lons, alts); - } - return new Line(lats, lons); - } - - public static Point randomPoint() { - return randomPoint(randomBoolean()); - } - - public static Point randomPoint(boolean hasAlt) { - if (hasAlt) { - return new Point(randomLat(), randomLon(), randomDouble()); - } else { - return new Point(randomLat(), randomLon()); - } - } - - public static LinearRing randomLinearRing(boolean hasAlt) { - int size = randomIntBetween(3, 10); - double[] lats = new double[size + 1]; - double[] lons = new double[size + 1]; - double[] alts; - if (hasAlt) { - alts = new double[size + 1]; - } else { - alts = null; - } - for (int i = 0; i < size; i++) { - lats[i] = randomLat(); - lons[i] = randomLon(); - if (hasAlt) { - alts[i] = randomDouble(); - } - } - lats[size] = lats[0]; - lons[size] = lons[0]; - if (hasAlt) { - alts[size] = alts[0]; - return new LinearRing(lats, lons, alts); - } else { - return new LinearRing(lats, lons); - } - } - - public static Polygon randomPolygon(boolean hasAlt) { - int size = randomIntBetween(0, 10); - List holes = new ArrayList<>(); - for (int i = 0; i < size; i++) { - holes.add(randomLinearRing(hasAlt)); - } - if (holes.size() > 0) { - return new Polygon(randomLinearRing(hasAlt), holes); - } else { - return new Polygon(randomLinearRing(hasAlt)); - } - } - - public static Rectangle randomRectangle() { - double lat1 = randomLat(); - double lat2 = randomLat(); - double minLon = randomLon(); - double maxLon = randomLon(); - return new Rectangle(Math.min(lat1, lat2), Math.max(lat1, lat2), minLon, maxLon); - } - - public static GeometryCollection randomGeometryCollection(boolean hasAlt) { - return randomGeometryCollection(0, hasAlt); - } - - private static GeometryCollection randomGeometryCollection(int level, boolean hasAlt) { - int size = randomIntBetween(1, 10); - List shapes = new ArrayList<>(); - for (int i = 0; i < size; i++) { - @SuppressWarnings("unchecked") Function geometry = randomFrom( - BaseGeometryTestCase::randomCircle, - BaseGeometryTestCase::randomLine, - BaseGeometryTestCase::randomPoint, - BaseGeometryTestCase::randomPolygon, - hasAlt ? BaseGeometryTestCase::randomPoint : (b) -> randomRectangle(), - level < 3 ? (b) -> randomGeometryCollection(level + 1, b) : BaseGeometryTestCase::randomPoint // don't build too deep - ); - shapes.add(geometry.apply(hasAlt)); - } - return new GeometryCollection<>(shapes); - } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java index e8912a39fb435..14107494928bd 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.GeometryValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -59,5 +60,10 @@ public void testInitValidation() { ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(10, 200, 1))); assertEquals("invalid longitude 200.0; must be between -180.0 and 180.0", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate(new Circle(10, 200, 1, 20))); + assertEquals("found Z value [1.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate(new Circle(10, 200, 1, 20)); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java index c78c47dfbcd96..06f778aedafb1 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -30,7 +32,7 @@ public class GeometryCollectionTests extends BaseGeometryTestCase> { @Override protected GeometryCollection createTestInstance(boolean hasAlt) { - return randomGeometryCollection(hasAlt); + return GeometryTestUtils.randomGeometryCollection(hasAlt); } @@ -58,5 +60,11 @@ public void testInitValidation() { ex = expectThrows(IllegalArgumentException.class, () -> new GeometryCollection<>( Arrays.asList(new Point(10, 20), new Point(10, 20, 30)))); assertEquals("all elements of the collection should have the same number of dimension", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate( + new GeometryCollection(Collections.singletonList(new Point(10, 20, 30))))); + assertEquals("found Z value [30.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate(new GeometryCollection(Collections.singletonList(new Point(10, 20, 30)))); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java index b9f8cb37f5422..ff18dcf927011 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java @@ -19,8 +19,10 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.GeometryValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -29,7 +31,7 @@ public class LineTests extends BaseGeometryTestCase { @Override protected Line createTestInstance(boolean hasAlt) { - return randomLine(hasAlt); + return GeometryTestUtils.randomLine(hasAlt); } public void testBasicSerialization() throws IOException, ParseException { @@ -59,6 +61,12 @@ public void testInitValidation() { ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Line(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate( + new Line(new double[]{1, 2}, new double[]{3, 4}, new double[]{6, 5}))); + assertEquals("found Z value [6.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate(new Line(new double[]{1, 2}, new double[]{3, 4}, new double[]{6, 5})); } public void testWKTValidation() { diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java index 07e9e866233e7..34ebb8e25d596 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.GeometryValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.test.ESTestCase; @@ -58,6 +59,12 @@ public void testInitValidation() { ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new LinearRing(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate( + new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{1, 1, 1, 1}))); + assertEquals("found Z value [1.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{1, 1, 1, 1})); } public void testVisitor() { diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java index 9ed782e65cc06..2b230dbbe52ef 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -35,7 +37,7 @@ protected MultiLine createTestInstance(boolean hasAlt) { int size = randomIntBetween(1, 10); List arr = new ArrayList(); for (int i = 0; i < size; i++) { - arr.add(randomLine(hasAlt)); + arr.add(GeometryTestUtils.randomLine(hasAlt)); } return new MultiLine(arr); } @@ -50,4 +52,13 @@ public void testBasicSerialization() throws IOException, ParseException { assertEquals("multilinestring EMPTY", wkt.toWKT(MultiLine.EMPTY)); assertEquals(MultiLine.EMPTY, wkt.fromWKT("multilinestring EMPTY)")); } + + public void testValidation() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate( + new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}, new double[]{6, 5}))))); + assertEquals("found Z value [6.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate( + new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}, new double[]{6, 5})))); + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java index c170adf9c9411..ae0e05792897f 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -36,7 +38,7 @@ protected MultiPoint createTestInstance(boolean hasAlt) { int size = randomIntBetween(1, 10); List arr = new ArrayList<>(); for (int i = 0; i < size; i++) { - arr.add(randomPoint(hasAlt)); + arr.add(GeometryTestUtils.randomPoint(hasAlt)); } return new MultiPoint(arr); } @@ -61,4 +63,12 @@ public void testBasicSerialization() throws IOException, ParseException { assertEquals("multipoint EMPTY", wkt.toWKT(MultiPoint.EMPTY)); assertEquals(MultiPoint.EMPTY, wkt.fromWKT("multipoint EMPTY)")); } + + public void testValidation() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate( + new MultiPoint(Collections.singletonList(new Point(1, 2 ,3))))); + assertEquals("found Z value [3.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate(new MultiPoint(Collections.singletonList(new Point(1, 2 ,3)))); + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java index 9918dfa546c82..45e05c93d135a 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -35,7 +37,7 @@ protected MultiPolygon createTestInstance(boolean hasAlt) { int size = randomIntBetween(1, 10); List arr = new ArrayList<>(); for (int i = 0; i < size; i++) { - arr.add(randomPolygon(hasAlt)); + arr.add(GeometryTestUtils.randomPolygon(hasAlt)); } return new MultiPolygon(arr); } @@ -52,4 +54,16 @@ public void testBasicSerialization() throws IOException, ParseException { assertEquals("multipolygon EMPTY", wkt.toWKT(MultiPolygon.EMPTY)); assertEquals(MultiPolygon.EMPTY, wkt.fromWKT("multipolygon EMPTY)")); } + + public void testValidation() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate( + new MultiPolygon(Collections.singletonList( + new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{1, 2, 3, 1})) + )))); + assertEquals("found Z value [1.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate( + new MultiPolygon(Collections.singletonList( + new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{1, 2, 3, 1}))))); + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java index 82e8fc40e75e9..1adb3705f1374 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java @@ -19,8 +19,10 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.GeometryValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -29,7 +31,7 @@ public class PointTests extends BaseGeometryTestCase { @Override protected Point createTestInstance(boolean hasAlt) { - return randomPoint(hasAlt); + return GeometryTestUtils.randomPoint(hasAlt); } public void testBasicSerialization() throws IOException, ParseException { @@ -51,6 +53,11 @@ public void testInitValidation() { ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Point(10, 500))); assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate(new Point(1, 2, 3))); + assertEquals("found Z value [3.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate(new Point(1, 2, 3)); } public void testWKTValidation() { diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java index adbe1f38cdcc0..c4a5ae5dd5abd 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -29,7 +31,7 @@ public class PolygonTests extends BaseGeometryTestCase { @Override protected Polygon createTestInstance(boolean hasAlt) { - return randomPolygon(hasAlt); + return GeometryTestUtils.randomPolygon(hasAlt); } public void testBasicSerialization() throws IOException, ParseException { @@ -70,6 +72,13 @@ public void testInitValidation() { () -> new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{5, 4, 3, 5}), Collections.singletonList(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))); assertEquals("holes must have the same number of dimensions as the polygon", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate( + new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{1, 2, 3, 1})))); + assertEquals("found Z value [1.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate( + new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}, new double[]{1, 2, 3, 1}))); } public void testWKTValidation() { diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java index 8bd1494eb34a9..a5f3e4d468610 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java @@ -19,8 +19,10 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.GeometryValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -30,7 +32,7 @@ public class RectangleTests extends BaseGeometryTestCase { @Override protected Rectangle createTestInstance(boolean hasAlt) { assumeFalse("3rd dimension is not supported yet", hasAlt); - return randomRectangle(); + return GeometryTestUtils.randomRectangle(); } public void testBasicSerialization() throws IOException, ParseException { @@ -59,5 +61,11 @@ public void testInitValidation() { ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Rectangle(1, 2, 2, 3, 5, Double.NaN))); assertEquals("only one altitude value is specified", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> new StandardValidator(false).validate( + new Rectangle(30, 40, 50, 10, 20, 60))); + assertEquals("found Z value [20.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); + + new StandardValidator(true).validate(new Rectangle(30, 40, 50, 10, 20, 60)); } } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java index 48d83d2169248..8e590c830b953 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java @@ -35,7 +35,7 @@ public WriteOperation createWriteOperation(SocketChannelContext context, Object } @Override - public void channelRegistered() {} + public void channelActive() {} @Override public List writeToBytes(WriteOperation writeOperation) { diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/ChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/ChannelContext.java index a2663385daa0f..a030f68fe8bf8 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/ChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/ChannelContext.java @@ -50,17 +50,19 @@ protected void register() throws IOException { doSelectorRegister(); } + protected void channelActive() throws IOException {} + // Package private for testing void doSelectorRegister() throws IOException { - setSelectionKey(rawChannel.register(getSelector().rawSelector(), 0)); + setSelectionKey(rawChannel.register(getSelector().rawSelector(), 0, this)); } - SelectionKey getSelectionKey() { + protected SelectionKey getSelectionKey() { return selectionKey; } - // Protected for tests - protected void setSelectionKey(SelectionKey selectionKey) { + // public for tests + public void setSelectionKey(SelectionKey selectionKey) { this.selectionKey = selectionKey; } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java b/libs/nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java index 423168b35d09b..b886f9b68aac0 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java @@ -19,10 +19,13 @@ package org.elasticsearch.nio; +import org.elasticsearch.common.CheckedRunnable; + import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; import java.net.InetSocketAddress; +import java.net.SocketException; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.security.AccessController; @@ -206,17 +209,30 @@ ServerSocketChannel openNioServerSocketChannel(InetSocketAddress address) throws return serverSocketChannel; } + private static final boolean MAC_OS_X = System.getProperty("os.name").startsWith("Mac OS X"); + + private static void setSocketOption(CheckedRunnable runnable) throws SocketException { + try { + runnable.run(); + } catch (SocketException e) { + if (MAC_OS_X == false) { + // ignore on Mac, see https://github.com/elastic/elasticsearch/issues/41071 + throw e; + } + } + } + private void configureSocketChannel(SocketChannel channel) throws IOException { channel.configureBlocking(false); java.net.Socket socket = channel.socket(); - socket.setTcpNoDelay(tcpNoDelay); - socket.setKeepAlive(tcpKeepAlive); - socket.setReuseAddress(tcpReusedAddress); + setSocketOption(() -> socket.setTcpNoDelay(tcpNoDelay)); + setSocketOption(() -> socket.setKeepAlive(tcpKeepAlive)); + setSocketOption(() -> socket.setReuseAddress(tcpReusedAddress)); if (tcpSendBufferSize > 0) { - socket.setSendBufferSize(tcpSendBufferSize); + setSocketOption(() -> socket.setSendBufferSize(tcpSendBufferSize)); } if (tcpReceiveBufferSize > 0) { - socket.setSendBufferSize(tcpReceiveBufferSize); + setSocketOption(() -> socket.setSendBufferSize(tcpReceiveBufferSize)); } } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/DelegatingHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/DelegatingHandler.java index d928b0bf9349d..e320f79f999e3 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/DelegatingHandler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/DelegatingHandler.java @@ -32,8 +32,8 @@ public DelegatingHandler(NioChannelHandler delegate) { } @Override - public void channelRegistered() { - this.delegate.channelRegistered(); + public void channelActive() { + this.delegate.channelActive(); } @Override diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/EventHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/EventHandler.java index 4a6fed2bb3495..302056578e902 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/EventHandler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/EventHandler.java @@ -63,8 +63,28 @@ protected void acceptException(ServerChannelContext context, Exception exception */ protected void handleRegistration(ChannelContext context) throws IOException { context.register(); - SelectionKey selectionKey = context.getSelectionKey(); - selectionKey.attach(context); + assert context.getSelectionKey() != null : "SelectionKey should not be null after registration"; + assert context.getSelectionKey().attachment() != null : "Attachment should not be null after registration"; + } + + /** + * This method is called when an attempt to register a channel throws an exception. + * + * @param context that was registered + * @param exception that occurred + */ + protected void registrationException(ChannelContext context, Exception exception) { + context.handleException(exception); + } + + /** + * This method is called after a NioChannel is active with the selector. It should only be called once + * per channel. + * + * @param context that was marked active + */ + protected void handleActive(ChannelContext context) throws IOException { + context.channelActive(); if (context instanceof SocketChannelContext) { if (((SocketChannelContext) context).readyForFlush()) { SelectionKeyUtils.setConnectReadAndWriteInterested(context.getSelectionKey()); @@ -78,12 +98,12 @@ protected void handleRegistration(ChannelContext context) throws IOException } /** - * This method is called when an attempt to register a channel throws an exception. + * This method is called when setting a channel to active throws an exception. * - * @param context that was registered + * @param context that was marked active * @param exception that occurred */ - protected void registrationException(ChannelContext context, Exception exception) { + protected void activeException(ChannelContext context, Exception exception) { context.handleException(exception); } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioChannelHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioChannelHandler.java index 61bda9a450768..2d91e769368d2 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/NioChannelHandler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioChannelHandler.java @@ -29,9 +29,9 @@ public interface NioChannelHandler { /** - * This method is called when the channel is registered with its selector. + * This method is called when the channel is active for use. */ - void channelRegistered(); + void channelActive(); /** * This method is called when a message is queued with a channel. It can be called from any thread. diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java index fff07f625ff13..9034f0c643389 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java @@ -28,6 +28,7 @@ import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.util.Iterator; +import java.util.Objects; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentLinkedQueue; @@ -196,7 +197,8 @@ void cleanupAndCloseChannels() { cleanupPendingWrites(); channelsToClose.addAll(channelsToRegister); channelsToRegister.clear(); - channelsToClose.addAll(selector.keys().stream().map(sk -> (ChannelContext) sk.attachment()).collect(Collectors.toList())); + channelsToClose.addAll(selector.keys().stream() + .map(sk -> (ChannelContext) sk.attachment()).filter(Objects::nonNull).collect(Collectors.toList())); closePendingChannels(); } @@ -338,22 +340,32 @@ public void scheduleForRegistration(NioChannel channel) { private void writeToChannel(WriteOperation writeOperation) { assertOnSelectorThread(); SocketChannelContext context = writeOperation.getChannel(); - // If the channel does not currently have anything that is ready to flush, we should flush after - // the write operation is queued. - boolean shouldFlushAfterQueuing = context.readyForFlush() == false; - try { - SelectionKeyUtils.setWriteInterested(context.getSelectionKey()); - context.queueWriteOperation(writeOperation); - } catch (Exception e) { - shouldFlushAfterQueuing = false; - executeFailedListener(writeOperation.getListener(), e); - } - if (shouldFlushAfterQueuing) { - if (context.selectorShouldClose() == false) { - handleWrite(context); + if (context.isOpen() == false) { + executeFailedListener(writeOperation.getListener(), new ClosedChannelException()); + } else if (context.getSelectionKey() == null) { + // This should very rarely happen. The only times a channel is exposed outside the event loop, + // but might not registered is through the exception handler and channel accepted callbacks. + executeFailedListener(writeOperation.getListener(), new IllegalStateException("Channel not registered")); + } else { + // If the channel does not currently have anything that is ready to flush, we should flush after + // the write operation is queued. + boolean shouldFlushAfterQueuing = context.readyForFlush() == false; + try { + context.queueWriteOperation(writeOperation); + } catch (Exception e) { + shouldFlushAfterQueuing = false; + executeFailedListener(writeOperation.getListener(), e); + } + + if (shouldFlushAfterQueuing) { + // We only attempt the write if the connect process is complete and the context is not + // signalling that it should be closed. + if (context.isConnectComplete() && context.selectorShouldClose() == false) { + handleWrite(context); + } + eventHandler.postHandling(context); } - eventHandler.postHandling(context); } } @@ -434,14 +446,25 @@ private void registerChannel(ChannelContext newChannel) { try { if (newChannel.isOpen()) { eventHandler.handleRegistration(newChannel); + channelActive(newChannel); if (newChannel instanceof SocketChannelContext) { attemptConnect((SocketChannelContext) newChannel, false); } } else { eventHandler.registrationException(newChannel, new ClosedChannelException()); + closeChannel(newChannel); } } catch (Exception e) { eventHandler.registrationException(newChannel, e); + closeChannel(newChannel); + } + } + + private void channelActive(ChannelContext newChannel) { + try { + eventHandler.handleActive(newChannel); + } catch (IOException e) { + eventHandler.activeException(newChannel, e); } } @@ -463,11 +486,7 @@ private void closeChannel(final ChannelContext channelContext) { private void handleQueuedWrites() { WriteOperation writeOperation; while ((writeOperation = queuedWrites.poll()) != null) { - if (writeOperation.getChannel().isOpen()) { - writeToChannel(writeOperation); - } else { - executeFailedListener(writeOperation.getListener(), new ClosedChannelException()); - } + writeToChannel(writeOperation); } } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java index f77ccb17aef39..bc93466b58a63 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -156,9 +156,8 @@ protected FlushOperation getPendingFlush() { } @Override - protected void register() throws IOException { - super.register(); - readWriteHandler.channelRegistered(); + protected void channelActive() throws IOException { + readWriteHandler.channelActive(); } @Override diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/EventHandlerTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/EventHandlerTests.java index 578890b152ff1..726d87317ffff 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/EventHandlerTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/EventHandlerTests.java @@ -81,32 +81,25 @@ public void setUpHandler() throws IOException { } public void testRegisterCallsContext() throws IOException { - NioSocketChannel channel = mock(NioSocketChannel.class); - SocketChannelContext channelContext = mock(SocketChannelContext.class); - when(channel.getContext()).thenReturn(channelContext); - when(channelContext.getSelectionKey()).thenReturn(new TestSelectionKey(0)); + ChannelContext channelContext = randomBoolean() ? mock(SocketChannelContext.class) : mock(ServerChannelContext.class); + TestSelectionKey attachment = new TestSelectionKey(0); + when(channelContext.getSelectionKey()).thenReturn(attachment); + attachment.attach(channelContext); handler.handleRegistration(channelContext); verify(channelContext).register(); } - public void testRegisterNonServerAddsOP_CONNECTAndOP_READInterest() throws IOException { + public void testActiveNonServerAddsOP_CONNECTAndOP_READInterest() throws IOException { SocketChannelContext context = mock(SocketChannelContext.class); when(context.getSelectionKey()).thenReturn(new TestSelectionKey(0)); - handler.handleRegistration(context); + handler.handleActive(context); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT, context.getSelectionKey().interestOps()); } - public void testRegisterAddsAttachment() throws IOException { - ChannelContext context = randomBoolean() ? mock(SocketChannelContext.class) : mock(ServerChannelContext.class); - when(context.getSelectionKey()).thenReturn(new TestSelectionKey(0)); - handler.handleRegistration(context); - assertEquals(context, context.getSelectionKey().attachment()); - } - - public void testHandleServerRegisterSetsOP_ACCEPTInterest() throws IOException { - assertNull(serverContext.getSelectionKey()); - - handler.handleRegistration(serverContext); + public void testHandleServerActiveSetsOP_ACCEPTInterest() throws IOException { + ServerChannelContext serverContext = mock(ServerChannelContext.class); + when(serverContext.getSelectionKey()).thenReturn(new TestSelectionKey(0)); + handler.handleActive(serverContext); assertEquals(SelectionKey.OP_ACCEPT, serverContext.getSelectionKey().interestOps()); } @@ -141,11 +134,11 @@ public void testAcceptExceptionCallsExceptionHandler() throws IOException { verify(serverChannelContext).handleException(exception); } - public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException { + public void testActiveWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException { FlushReadyWrite flushReadyWrite = mock(FlushReadyWrite.class); when(readWriteHandler.writeToBytes(flushReadyWrite)).thenReturn(Collections.singletonList(flushReadyWrite)); context.queueWriteOperation(flushReadyWrite); - handler.handleRegistration(context); + handler.handleActive(context); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE, context.getSelectionKey().interestOps()); } @@ -266,7 +259,9 @@ private class DoNotRegisterSocketContext extends BytesChannelContext { @Override public void register() { - setSelectionKey(new TestSelectionKey(0)); + TestSelectionKey selectionKey = new TestSelectionKey(0); + setSelectionKey(selectionKey); + selectionKey.attach(this); } } @@ -280,7 +275,9 @@ private class DoNotRegisterServerContext extends ServerChannelContext { @Override public void register() { + TestSelectionKey selectionKey = new TestSelectionKey(0); setSelectionKey(new TestSelectionKey(0)); + selectionKey.attach(this); } } } diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java index 55d2e645cadee..fe2039ec419e0 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.CancelledKeyException; import java.nio.channels.ClosedChannelException; import java.nio.channels.ClosedSelectorException; import java.nio.channels.SelectionKey; @@ -213,6 +212,7 @@ public void testRegisteredChannel() throws IOException { selector.preSelect(); verify(eventHandler).handleRegistration(serverChannelContext); + verify(eventHandler).handleActive(serverChannelContext); } public void testClosedServerChannelWillNotBeRegistered() { @@ -231,7 +231,20 @@ public void testRegisterServerChannelFailsDueToException() throws Exception { selector.preSelect(); + verify(eventHandler, times(0)).handleActive(serverChannelContext); verify(eventHandler).registrationException(serverChannelContext, closedChannelException); + verify(eventHandler).handleClose(serverChannelContext); + } + + public void testChannelActiveException() throws Exception { + executeOnNewThread(() -> selector.scheduleForRegistration(serverChannel)); + IOException ioException = new IOException(); + doThrow(ioException).when(eventHandler).handleActive(serverChannelContext); + + selector.preSelect(); + + verify(eventHandler).handleActive(serverChannelContext); + verify(eventHandler).activeException(serverChannelContext, ioException); } public void testClosedSocketChannelWillNotBeRegistered() throws Exception { @@ -242,6 +255,7 @@ public void testClosedSocketChannelWillNotBeRegistered() throws Exception { verify(eventHandler).registrationException(same(channelContext), any(ClosedChannelException.class)); verify(eventHandler, times(0)).handleConnect(channelContext); + verify(eventHandler).handleClose(channelContext); } public void testRegisterSocketChannelFailsDueToException() throws InterruptedException { @@ -254,7 +268,9 @@ public void testRegisterSocketChannelFailsDueToException() throws InterruptedExc selector.preSelect(); verify(eventHandler).registrationException(channelContext, closedChannelException); + verify(eventHandler, times(0)).handleActive(serverChannelContext); verify(eventHandler, times(0)).handleConnect(channelContext); + verify(eventHandler).handleClose(channelContext); }); } @@ -314,19 +330,15 @@ public void testQueueWriteChannelIsClosed() throws Exception { verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); } - public void testQueueWriteSelectionKeyThrowsException() throws Exception { - SelectionKey selectionKey = mock(SelectionKey.class); - + public void testQueueWriteChannelIsUnregistered() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); - CancelledKeyException cancelledKeyException = new CancelledKeyException(); - executeOnNewThread(() -> selector.queueWrite(writeOperation)); - when(channelContext.getSelectionKey()).thenReturn(selectionKey); - when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); + executeOnNewThread(() -> selector.queueWrite(writeOperation)); + when(channelContext.getSelectionKey()).thenReturn(null); selector.preSelect(); verify(channelContext, times(0)).queueWriteOperation(writeOperation); - verify(listener).accept(null, cancelledKeyException); + verify(listener).accept(isNull(Void.class), any(IllegalStateException.class)); } public void testQueueWriteSuccessful() throws Exception { @@ -338,52 +350,40 @@ public void testQueueWriteSuccessful() throws Exception { selector.preSelect(); verify(channelContext).queueWriteOperation(writeOperation); - assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); - assertEquals(0, (selectionKey.interestOps() & SelectionKey.OP_WRITE)); - when(channelContext.readyForFlush()).thenReturn(true); selector.queueWrite(writeOperation); verify(channelContext).queueWriteOperation(writeOperation); verify(eventHandler, times(0)).handleWrite(channelContext); verify(eventHandler, times(0)).postHandling(channelContext); - assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } public void testShouldFlushIfNoPendingFlushes() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); - assertEquals(0, (selectionKey.interestOps() & SelectionKey.OP_WRITE)); - when(channelContext.readyForFlush()).thenReturn(false); selector.queueWrite(writeOperation); verify(channelContext).queueWriteOperation(writeOperation); verify(eventHandler).handleWrite(channelContext); verify(eventHandler).postHandling(channelContext); - assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } - public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws Exception { - SelectionKey selectionKey = mock(SelectionKey.class); - + public void testShouldNotFlushIfChannelNotConnectedPendingFlushes() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); - CancelledKeyException cancelledKeyException = new CancelledKeyException(); - when(channelContext.getSelectionKey()).thenReturn(selectionKey); when(channelContext.readyForFlush()).thenReturn(false); - when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); + when(channelContext.isConnectComplete()).thenReturn(false); selector.queueWrite(writeOperation); - verify(channelContext, times(0)).queueWriteOperation(writeOperation); + verify(channelContext).queueWriteOperation(writeOperation); verify(eventHandler, times(0)).handleWrite(channelContext); - verify(eventHandler, times(0)).postHandling(channelContext); - verify(listener).accept(null, cancelledKeyException); + verify(eventHandler).postHandling(channelContext); } public void testConnectEvent() throws Exception { diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java index 5563ccc43063b..210a27aa109fa 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java @@ -53,7 +53,7 @@ public class SocketChannelContextTests extends ESTestCase { private NioSocketChannel channel; private BiConsumer listener; private NioSelector selector; - private NioChannelHandler readWriteHandler; + private NioChannelHandler handler; private ByteBuffer ioBuffer = ByteBuffer.allocate(1024); @SuppressWarnings("unchecked") @@ -67,9 +67,9 @@ public void setup() throws Exception { when(channel.getRawChannel()).thenReturn(rawChannel); exceptionHandler = mock(Consumer.class); selector = mock(NioSelector.class); - readWriteHandler = mock(NioChannelHandler.class); + handler = mock(NioChannelHandler.class); InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); - context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, handler, channelBuffer); when(selector.isOnCurrentThread()).thenReturn(true); when(selector.getIoBuffer()).thenAnswer(invocationOnMock -> { @@ -142,6 +142,11 @@ public void testConnectFails() throws IOException { assertSame(ioException, exception.get()); } + public void testChannelActiveCallsHandler() throws IOException { + context.channelActive(); + verify(handler).channelActive(); + } + public void testWriteFailsIfClosing() { context.closeChannel(); @@ -158,7 +163,7 @@ public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exce ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; WriteOperation writeOperation = mock(WriteOperation.class); - when(readWriteHandler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); + when(handler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); context.sendMessage(buffers, listener); verify(selector).queueWrite(writeOpCaptor.capture()); @@ -172,7 +177,7 @@ public void testSendMessageFromSameThreadIsQueuedInChannel() { ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; WriteOperation writeOperation = mock(WriteOperation.class); - when(readWriteHandler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); + when(handler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); context.sendMessage(buffers, listener); verify(selector).queueWrite(writeOpCaptor.capture()); @@ -186,16 +191,16 @@ public void testWriteIsQueuedInChannel() { ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; FlushReadyWrite writeOperation = new FlushReadyWrite(context, buffer, listener); - when(readWriteHandler.writeToBytes(writeOperation)).thenReturn(Collections.singletonList(writeOperation)); + when(handler.writeToBytes(writeOperation)).thenReturn(Collections.singletonList(writeOperation)); context.queueWriteOperation(writeOperation); - verify(readWriteHandler).writeToBytes(writeOperation); + verify(handler).writeToBytes(writeOperation); assertTrue(context.readyForFlush()); } public void testHandleReadBytesWillCheckForNewFlushOperations() throws IOException { assertFalse(context.readyForFlush()); - when(readWriteHandler.pollFlushOperations()).thenReturn(Collections.singletonList(mock(FlushOperation.class))); + when(handler.pollFlushOperations()).thenReturn(Collections.singletonList(mock(FlushOperation.class))); context.handleReadBytes(); assertTrue(context.readyForFlush()); } @@ -205,14 +210,14 @@ public void testFlushOpsClearedOnClose() throws Exception { try (SocketChannel realChannel = SocketChannel.open()) { when(channel.getRawChannel()).thenReturn(realChannel); InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); - context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, handler, channelBuffer); assertFalse(context.readyForFlush()); ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; WriteOperation writeOperation = mock(WriteOperation.class); BiConsumer listener2 = mock(BiConsumer.class); - when(readWriteHandler.writeToBytes(writeOperation)).thenReturn(Arrays.asList(new FlushOperation(buffer, listener), + when(handler.writeToBytes(writeOperation)).thenReturn(Arrays.asList(new FlushOperation(buffer, listener), new FlushOperation(buffer, listener2))); context.queueWriteOperation(writeOperation); @@ -233,7 +238,7 @@ public void testWillPollForFlushOpsToClose() throws Exception { try (SocketChannel realChannel = SocketChannel.open()) { when(channel.getRawChannel()).thenReturn(realChannel); InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); - context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, handler, channelBuffer); ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; @@ -241,7 +246,7 @@ public void testWillPollForFlushOpsToClose() throws Exception { assertFalse(context.readyForFlush()); when(channel.isOpen()).thenReturn(true); - when(readWriteHandler.pollFlushOperations()).thenReturn(Arrays.asList(new FlushOperation(buffer, listener), + when(handler.pollFlushOperations()).thenReturn(Arrays.asList(new FlushOperation(buffer, listener), new FlushOperation(buffer, listener2))); context.closeFromSelector(); @@ -257,9 +262,9 @@ public void testCloseClosesWriteProducer() throws IOException { when(channel.getRawChannel()).thenReturn(realChannel); when(channel.isOpen()).thenReturn(true); InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance(); - BytesChannelContext context = new BytesChannelContext(channel, selector, exceptionHandler, readWriteHandler, buffer); + BytesChannelContext context = new BytesChannelContext(channel, selector, exceptionHandler, handler, buffer); context.closeFromSelector(); - verify(readWriteHandler).close(); + verify(handler).close(); } } @@ -271,7 +276,7 @@ public void testCloseClosesChannelBuffer() throws IOException { IntFunction pageAllocator = (n) -> new Page(ByteBuffer.allocate(n), closer); InboundChannelBuffer buffer = new InboundChannelBuffer(pageAllocator); buffer.ensureCapacity(1); - TestSocketChannelContext context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, buffer); + TestSocketChannelContext context = new TestSocketChannelContext(channel, selector, exceptionHandler, handler, buffer); context.closeFromSelector(); verify(closer).run(); } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java new file mode 100644 index 0000000000000..c54e71634d6ed --- /dev/null +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java @@ -0,0 +1,440 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent.support; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.CharBuffer; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * Wraps a map generated by XContentParser's map() method into XContent Parser + */ +public class MapXContentParser extends AbstractXContentParser { + + private XContentType xContentType; + private TokenIterator iterator; + private boolean closed; + + public MapXContentParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Map map, + XContentType xContentType) { + super(xContentRegistry, deprecationHandler); + this.xContentType = xContentType; + this.iterator = new MapIterator(null, null, map); + } + + + @Override + protected boolean doBooleanValue() throws IOException { + if (iterator != null && iterator.currentValue() instanceof Boolean) { + return (Boolean) iterator.currentValue(); + } else { + throw new IllegalStateException("Cannot get boolean value for the current token " + currentToken()); + } + } + + @Override + protected short doShortValue() throws IOException { + return numberValue().shortValue(); + } + + @Override + protected int doIntValue() throws IOException { + return numberValue().intValue(); + } + + @Override + protected long doLongValue() throws IOException { + return numberValue().longValue(); + } + + @Override + protected float doFloatValue() throws IOException { + return numberValue().floatValue(); + } + + @Override + protected double doDoubleValue() throws IOException { + return numberValue().doubleValue(); + } + + @Override + public XContentType contentType() { + return xContentType; + } + + @Override + public Token nextToken() throws IOException { + if (iterator == null) { + return null; + } else { + iterator = iterator.next(); + } + return currentToken(); + } + + @Override + public void skipChildren() throws IOException { + Token token = currentToken(); + if (token == Token.START_OBJECT || token == Token.START_ARRAY) { + iterator = iterator.skipChildren(); + } + } + + @Override + public Token currentToken() { + if (iterator == null) { + return null; + } else { + return iterator.currentToken(); + } + } + + @Override + public String currentName() throws IOException { + if (iterator == null) { + return null; + } else { + return iterator.currentName(); + } + } + + @Override + public String text() throws IOException { + if (iterator != null) { + if (currentToken() == Token.VALUE_STRING || currentToken() == Token.VALUE_NUMBER || currentToken() == Token.VALUE_BOOLEAN) { + return iterator.currentValue().toString(); + } else if (currentToken() == Token.FIELD_NAME) { + return iterator.currentName(); + } else { + return null; + } + } else { + throw new IllegalStateException("Cannot get text for the current token " + currentToken()); + } + } + + @Override + public CharBuffer charBuffer() throws IOException { + throw new UnsupportedOperationException("use text() instead"); + } + + @Override + public Object objectText() throws IOException { + throw new UnsupportedOperationException("use text() instead"); + } + + @Override + public Object objectBytes() throws IOException { + throw new UnsupportedOperationException("use text() instead"); + } + + @Override + public boolean hasTextCharacters() { + throw new UnsupportedOperationException("use text() instead"); + } + + @Override + public char[] textCharacters() throws IOException { + throw new UnsupportedOperationException("use text() instead"); + } + + @Override + public int textLength() throws IOException { + throw new UnsupportedOperationException("use text() instead"); + } + + @Override + public int textOffset() throws IOException { + throw new UnsupportedOperationException("use text() instead"); + } + + @Override + public Number numberValue() throws IOException { + if (iterator != null && currentToken() == Token.VALUE_NUMBER) { + return (Number) iterator.currentValue(); + } else { + throw new IllegalStateException("Cannot get numeric value for the current token " + currentToken()); + } + } + + @Override + public NumberType numberType() throws IOException { + Number number = numberValue(); + if (number instanceof Integer) { + return NumberType.INT; + } else if (number instanceof BigInteger) { + return NumberType.BIG_INTEGER; + } else if (number instanceof Long) { + return NumberType.LONG; + } else if (number instanceof Float) { + return NumberType.FLOAT; + } else if (number instanceof Double) { + return NumberType.DOUBLE; + } else if (number instanceof BigDecimal) { + return NumberType.BIG_DECIMAL; + } + throw new IllegalStateException("No matching token for number_type [" + number.getClass() + "]"); + } + + @Override + public byte[] binaryValue() throws IOException { + if (iterator != null && iterator.currentValue() instanceof byte[]) { + return (byte[]) iterator.currentValue(); + } else { + throw new IllegalStateException("Cannot get binary value for the current token " + currentToken()); + } + } + + @Override + public XContentLocation getTokenLocation() { + return new XContentLocation(0, 0); + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public void close() throws IOException { + closed = true; + } + + /** + * Iterator over the elements of the map + */ + private abstract static class TokenIterator { + protected final TokenIterator parent; + protected final String name; + protected Token currentToken; + protected State state = State.BEFORE; + + TokenIterator(TokenIterator parent, String name) { + this.parent = parent; + this.name = name; + } + + public abstract TokenIterator next(); + + public abstract TokenIterator skipChildren(); + + public Token currentToken() { + return currentToken; + } + + public abstract Object currentValue(); + + /** + * name of the field name of the current element + */ + public abstract String currentName(); + + /** + * field name that the child element needs to inherit. + * + * In most cases this is the same as currentName() except with embedded arrays. In "foo": [[42]] the first START_ARRAY + * token will have the name "foo", but the second START_ARRAY will have no name. + */ + public abstract String childName(); + + @SuppressWarnings("unchecked") + TokenIterator processValue(Object value) { + if (value instanceof Map) { + return new MapIterator(this, childName(), (Map) value).next(); + } else if (value instanceof List) { + return new ArrayIterator(this, childName(), (List) value).next(); + } else if (value instanceof Number) { + currentToken = Token.VALUE_NUMBER; + } else if (value instanceof String) { + currentToken = Token.VALUE_STRING; + } else if (value instanceof Boolean) { + currentToken = Token.VALUE_BOOLEAN; + } else if (value instanceof byte[]) { + currentToken = Token.VALUE_EMBEDDED_OBJECT; + } else if (value == null) { + currentToken = Token.VALUE_NULL; + } + return this; + } + + } + + private enum State { + BEFORE, + NAME, + VALUE, + AFTER + } + + /** + * Iterator over the map + */ + private static class MapIterator extends TokenIterator { + + private final Iterator> iterator; + + private Map.Entry entry; + + MapIterator(TokenIterator parent, String name, Map map) { + super(parent, name); + iterator = map.entrySet().iterator(); + } + + @Override + public TokenIterator next() { + switch (state) { + case BEFORE: + state = State.NAME; + currentToken = Token.START_OBJECT; + return this; + case NAME: + if (iterator.hasNext()) { + state = State.VALUE; + entry = iterator.next(); + currentToken = Token.FIELD_NAME; + return this; + } else { + state = State.AFTER; + entry = null; + currentToken = Token.END_OBJECT; + return this; + } + case VALUE: + state = State.NAME; + return processValue(entry.getValue()); + case AFTER: + currentToken = null; + if (parent == null) { + return null; + } else { + return parent.next(); + } + default: + throw new IllegalArgumentException("Unknown state " + state); + + } + } + + @Override + public TokenIterator skipChildren() { + state = State.AFTER; + entry = null; + currentToken = Token.END_OBJECT; + return this; + } + + @Override + public Object currentValue() { + if (entry == null) { + throw new IllegalStateException("Cannot get value for non-value token " + currentToken); + } + return entry.getValue(); + } + + @Override + public String currentName() { + if (entry == null) { + return name; + } + return entry.getKey(); + } + + @Override + public String childName() { + return currentName(); + } + } + + private static class ArrayIterator extends TokenIterator { + private final Iterator iterator; + + private Object value; + + private ArrayIterator(TokenIterator parent, String name, List list) { + super(parent, name); + iterator = list.iterator(); + } + + @Override + public TokenIterator next() { + switch (state) { + case BEFORE: + state = State.VALUE; + currentToken = Token.START_ARRAY; + return this; + case VALUE: + if (iterator.hasNext()) { + value = iterator.next(); + return processValue(value); + } else { + state = State.AFTER; + value = null; + currentToken = Token.END_ARRAY; + return this; + } + case AFTER: + currentToken = null; + if (parent == null) { + return null; + } else { + return parent.next(); + } + default: + throw new IllegalArgumentException("Unknown state " + state); + } + } + + @Override + public TokenIterator skipChildren() { + state = State.AFTER; + value = null; + currentToken = Token.END_ARRAY; + return this; + } + + @Override + public Object currentValue() { + return value; + } + + @Override + public String currentName() { + if (parent == null || (currentToken != Token.START_ARRAY && currentToken != Token.END_ARRAY)) { + return null; + } else { + return name; + } + } + + @Override + public String childName() { + return null; + } + } +} diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java new file mode 100644 index 0000000000000..0d2113152ebe8 --- /dev/null +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.support.MapXContentParser; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentParserTests.generateRandomObject; + +public class MapXContentParserTests extends ESTestCase { + + public void testSimpleMap() throws IOException { + compareTokens(builder -> { + builder.startObject(); + builder.field("string", "foo"); + builder.field("number", 42); + builder.field("double", 42.5); + builder.field("bool", false); + builder.startArray("arr"); + { + builder.value(10).value(20.0).value("30"); + builder.startArray(); + builder.value(30); + builder.endArray(); + } + builder.endArray(); + builder.startArray("nested_arr"); + { + builder.startArray(); + builder.value(10); + builder.endArray(); + } + builder.endArray(); + builder.startObject("obj"); + { + builder.field("inner_string", "bar"); + builder.startObject("inner_empty_obj"); + builder.field("f", "a"); + builder.endObject(); + } + builder.endObject(); + builder.field("bytes", new byte[]{1, 2, 3}); + builder.nullField("nothing"); + builder.endObject(); + }); + } + + + public void testRandomObject() throws IOException { + compareTokens(builder -> generateRandomObject(builder, randomIntBetween(0, 10))); + } + + public void compareTokens(CheckedConsumer consumer) throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + consumer.accept(builder); + final Map map; + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + map = parser.mapOrdered(); + } + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + try (XContentParser mapParser = new MapXContentParser( + xContentRegistry(), LoggingDeprecationHandler.INSTANCE, map, xContentType)) { + assertEquals(parser.contentType(), mapParser.contentType()); + XContentParser.Token token; + assertEquals(parser.currentToken(), mapParser.currentToken()); + assertEquals(parser.currentName(), mapParser.currentName()); + do { + token = parser.nextToken(); + XContentParser.Token mapToken = mapParser.nextToken(); + assertEquals(token, mapToken); + assertEquals(parser.currentName(), mapParser.currentName()); + if (token != null && (token.isValue() || token == XContentParser.Token.VALUE_NULL)) { + assertEquals(parser.textOrNull(), mapParser.textOrNull()); + switch (token) { + case VALUE_STRING: + assertEquals(parser.text(), mapParser.text()); + break; + case VALUE_NUMBER: + assertEquals(parser.numberType(), mapParser.numberType()); + assertEquals(parser.numberValue(), mapParser.numberValue()); + if (parser.numberType() == XContentParser.NumberType.LONG || + parser.numberType() == XContentParser.NumberType.INT) { + assertEquals(parser.longValue(), mapParser.longValue()); + if (parser.longValue() <= Integer.MAX_VALUE && parser.longValue() >= Integer.MIN_VALUE) { + assertEquals(parser.intValue(), mapParser.intValue()); + if (parser.longValue() <= Short.MAX_VALUE && parser.longValue() >= Short.MIN_VALUE) { + assertEquals(parser.shortValue(), mapParser.shortValue()); + } + } + } else { + assertEquals(parser.doubleValue(), mapParser.doubleValue(), 0.000001); + } + break; + case VALUE_BOOLEAN: + assertEquals(parser.booleanValue(), mapParser.booleanValue()); + break; + case VALUE_EMBEDDED_OBJECT: + assertArrayEquals(parser.binaryValue(), mapParser.binaryValue()); + break; + case VALUE_NULL: + assertNull(mapParser.textOrNull()); + break; + } + assertEquals(parser.currentName(), mapParser.currentName()); + assertEquals(parser.isClosed(), mapParser.isClosed()); + } else if (token == XContentParser.Token.START_ARRAY || token == XContentParser.Token.START_OBJECT) { + if (randomInt(5) == 0) { + parser.skipChildren(); + mapParser.skipChildren(); + } + } + } while (token != null); + assertEquals(parser.nextToken(), mapParser.nextToken()); + parser.close(); + mapParser.close(); + assertEquals(parser.isClosed(), mapParser.isClosed()); + assertTrue(mapParser.isClosed()); + } + } + + } + } +} diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 31a00c4025ab2..0cfa01876c590 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -504,7 +504,7 @@ public void testCreateRootSubParser() throws IOException { * * Returns the number of tokens in the marked field */ - private int generateRandomObjectForMarking(XContentBuilder builder) throws IOException { + private static int generateRandomObjectForMarking(XContentBuilder builder) throws IOException { builder.startObject() .field("first_field", "foo") .field("marked_field"); @@ -513,7 +513,7 @@ private int generateRandomObjectForMarking(XContentBuilder builder) throws IOExc return numberOfTokens; } - private int generateRandomObject(XContentBuilder builder, int level) throws IOException { + public static int generateRandomObject(XContentBuilder builder, int level) throws IOException { int tokens = 2; builder.startObject(); int numberOfElements = randomInt(5); @@ -525,7 +525,7 @@ private int generateRandomObject(XContentBuilder builder, int level) throws IOEx return tokens; } - private int generateRandomValue(XContentBuilder builder, int level) throws IOException { + private static int generateRandomValue(XContentBuilder builder, int level) throws IOException { @SuppressWarnings("unchecked") CheckedSupplier fieldGenerator = randomFrom( () -> { builder.value(randomInt()); @@ -560,7 +560,7 @@ private int generateRandomValue(XContentBuilder builder, int level) throws IOExc return fieldGenerator.get(); } - private int generateRandomArray(XContentBuilder builder, int level) throws IOException { + private static int generateRandomArray(XContentBuilder builder, int level) throws IOException { int tokens = 2; int arraySize = randomInt(3); builder.startArray(); diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java index 1be3279e8eaf1..15fe158498cc9 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java @@ -82,19 +82,28 @@ public RunningStats(StreamInput in) throws IOException { // read doc count docCount = (Long)in.readGenericValue(); // read fieldSum - fieldSum = (HashMap)in.readGenericValue(); + fieldSum = convertIfNeeded((Map)in.readGenericValue()); // counts - counts = (HashMap)in.readGenericValue(); + counts = convertIfNeeded((Map)in.readGenericValue()); // means - means = (HashMap)in.readGenericValue(); + means = convertIfNeeded((Map)in.readGenericValue()); // variances - variances = (HashMap)in.readGenericValue(); + variances = convertIfNeeded((Map)in.readGenericValue()); // skewness - skewness = (HashMap)in.readGenericValue(); + skewness = convertIfNeeded((Map)in.readGenericValue()); // kurtosis - kurtosis = (HashMap)in.readGenericValue(); + kurtosis = convertIfNeeded((Map)in.readGenericValue()); // read covariances - covariances = (HashMap>)in.readGenericValue(); + covariances = convertIfNeeded((Map>)in.readGenericValue()); + } + + // Convert Map to HashMap if it isn't + private static HashMap convertIfNeeded(Map map) { + if (map instanceof HashMap) { + return (HashMap) map; + } else { + return new HashMap<>(map); + } } @Override diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml index 7de297bb4e215..ae039e453be6c 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml @@ -27,6 +27,9 @@ setup: properties: field: type: text + phrase_field: + type: text + index_phrases: true - do: index: @@ -204,3 +207,26 @@ setup: - match: { hits.hits.2._id: "1" } - match: { hits.hits.3._id: "8" } - match: { hits.hits.4._id: "2" } + +--- +"index_phrases": + + - do: + index: + index: test + id: 9 + body: + phrase_field: "bar baz" + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + phrase_field: + query: bar baz + analyzer: lower_graph_syns + - match: { hits.total: 1 } + diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index b086deb96fe18..a6965424740b2 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.node.NodeClient; @@ -45,21 +45,23 @@ import static org.elasticsearch.ingest.common.IngestCommonPlugin.GROK_PATTERNS; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class GrokProcessorGetAction extends StreamableResponseActionType { +public class GrokProcessorGetAction extends ActionType { static final GrokProcessorGetAction INSTANCE = new GrokProcessorGetAction(); static final String NAME = "cluster:admin/ingest/processor/grok/get"; private GrokProcessorGetAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(null); + super(NAME, Response::new); } public static class Request extends ActionRequest { + + public Request() {} + + Request(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; @@ -67,12 +69,17 @@ public ActionRequestValidationException validate() { } public static class Response extends ActionResponse implements ToXContentObject { - private Map grokPatterns; + private final Map grokPatterns; Response(Map grokPatterns) { this.grokPatterns = grokPatterns; } + Response(StreamInput in) throws IOException { + super(in); + grokPatterns = in.readMap(StreamInput::readString, StreamInput::readString); + } + public Map getGrokPatterns() { return grokPatterns; } @@ -86,12 +93,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - grokPatterns = in.readMap(StreamInput::readString, StreamInput::readString); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(grokPatterns, StreamOutput::writeString, StreamOutput::writeString); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java index cc8ca33161be4..f44284b39d1ba 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java @@ -34,7 +34,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.nullValue; - public class GrokProcessorGetActionTests extends ESTestCase { private static final Map TEST_PATTERNS = Collections.singletonMap("PATTERN", "foo"); @@ -43,8 +42,7 @@ public void testRequest() throws Exception { BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - GrokProcessorGetAction.Request otherRequest = new GrokProcessorGetAction.Request(); - otherRequest.readFrom(streamInput); + GrokProcessorGetAction.Request otherRequest = new GrokProcessorGetAction.Request(streamInput); assertThat(otherRequest.validate(), nullValue()); } @@ -53,8 +51,7 @@ public void testResponseSerialization() throws Exception { BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - GrokProcessorGetAction.Response otherResponse = new GrokProcessorGetAction.Response(null); - otherResponse.readFrom(streamInput); + GrokProcessorGetAction.Response otherResponse = new GrokProcessorGetAction.Response(streamInput); assertThat(response.getGrokPatterns(), equalTo(TEST_PATTERNS)); assertThat(response.getGrokPatterns(), equalTo(otherResponse.getGrokPatterns())); } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index a962e8793970a..8d355ec874912 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -55,6 +55,9 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { @BeforeClass public static void loadDatabaseReaders() throws IOException { + // there are still problems on windows + assumeFalse("https://github.com/elastic/elasticsearch/issues/44552", Constants.WINDOWS); + // Skip setup because Windows cannot cleanup these files properly. The reason is that they are using // a MappedByteBuffer which will keep the file mappings active until it is garbage-collected. As a consequence, // the corresponding file appears to be still in use and Windows cannot delete it. diff --git a/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 deleted file mode 100644 index 2554e8ce52652..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c98e3b9d25f27ab05ac643cfb28756daa516bc7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.2.0-snapshot-6413aae226.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..22c90f85ee030 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +da9aba1dcaea004f04a37fd5c1b900a2347d4cdd \ No newline at end of file diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java index a096a89951e7c..44899e2363c3e 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class MultiSearchTemplateAction extends ActionType { @@ -28,11 +27,6 @@ public class MultiSearchTemplateAction extends ActionType getResponseReader() { - return MultiSearchTemplateResponse::new; + super(NAME, MultiSearchTemplateResponse::new); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index c80f99484a947..495f163ea80af 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -47,6 +47,14 @@ public class MultiSearchTemplateRequest extends ActionRequest implements Composi private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); + public MultiSearchTemplateRequest() {} + + MultiSearchTemplateRequest(StreamInput in) throws IOException { + super(in); + maxConcurrentSearchRequests = in.readVInt(); + requests = in.readList(SearchTemplateRequest::new); + } + /** * Add a search template request to execute. Note, the order is important, the search response will be returned in the * same order as the search requests. @@ -115,18 +123,11 @@ public MultiSearchTemplateRequest indicesOptions(IndicesOptions indicesOptions) return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - maxConcurrentSearchRequests = in.readVInt(); - requests = in.readStreamableList(SearchTemplateRequest::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(maxConcurrentSearchRequests); - out.writeStreamableList(requests); + out.writeList(requests); } @Override diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index a6a894f5980eb..614be00c4076f 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -146,11 +146,6 @@ public TimeValue getTook() { return new TimeValue(tookInMillis); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(items.length); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java index 2e8417c993990..fb124731420ed 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class SearchTemplateAction extends ActionType { @@ -28,11 +27,6 @@ public class SearchTemplateAction extends ActionType { public static final String NAME = "indices:data/read/search/template"; private SearchTemplateAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return SearchTemplateResponse::new; + super(NAME, SearchTemplateResponse::new); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java index da3cc3688149c..71df85b145c43 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java @@ -54,7 +54,19 @@ public class SearchTemplateRequest extends ActionRequest implements CompositeInd private String script; private Map scriptParams; - public SearchTemplateRequest() { + public SearchTemplateRequest() {} + + public SearchTemplateRequest(StreamInput in) throws IOException { + super(in); + request = in.readOptionalWriteable(SearchRequest::new); + simulate = in.readBoolean(); + explain = in.readBoolean(); + profile = in.readBoolean(); + scriptType = ScriptType.readFrom(in); + script = in.readOptionalString(); + if (in.readBoolean()) { + scriptParams = in.readMap(); + } } public SearchTemplateRequest(SearchRequest searchRequest) { @@ -217,24 +229,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .endObject(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = in.readOptionalWriteable(SearchRequest::new); - simulate = in.readBoolean(); - explain = in.readBoolean(); - profile = in.readBoolean(); - scriptType = ScriptType.readFrom(in); - script = in.readOptionalString(); - if (in.readBoolean()) { - scriptParams = in.readMap(); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalStreamable(request); + out.writeOptionalWriteable(request); out.writeBoolean(simulate); out.writeBoolean(explain); out.writeBoolean(profile); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java index 20d16894dc935..80617ea5788f3 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java @@ -82,12 +82,7 @@ public String toString() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBytesReference(source); - out.writeOptionalStreamable(response); - } - - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + out.writeOptionalWriteable(response); } public static SearchTemplateResponse fromXContent(XContentParser parser) throws IOException { diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java index 061da2a0abaea..a78cfbd2b6eec 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java @@ -42,7 +42,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.function.Supplier; public class TransportSearchTemplateAction extends HandledTransportAction { @@ -55,7 +54,7 @@ public class TransportSearchTemplateAction extends HandledTransportAction) SearchTemplateRequest::new); + super(SearchTemplateAction.NAME, transportService, actionFilters, SearchTemplateRequest::new); this.scriptService = scriptService; this.xContentRegistry = xContentRegistry; this.client = client; diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java index b91054ee131d2..47329828b0014 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java @@ -19,10 +19,11 @@ package org.elasticsearch.script.mustache; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.RandomSearchRequestGenerator; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; import java.util.ArrayList; @@ -31,16 +32,16 @@ import java.util.Map; import java.util.function.Consumer; -public class SearchTemplateRequestTests extends AbstractStreamableTestCase { +public class SearchTemplateRequestTests extends AbstractWireSerializingTestCase { @Override - protected SearchTemplateRequest createBlankInstance() { - return new SearchTemplateRequest(); + protected SearchTemplateRequest createTestInstance() { + return createRandomRequest(); } @Override - protected SearchTemplateRequest createTestInstance() { - return createRandomRequest(); + protected Writeable.Reader instanceReader() { + return SearchTemplateRequest::new; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java index a50ba2c0f8fdd..9fe7c92bb4daa 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java @@ -72,12 +72,7 @@ public class PainlessContextAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, PainlessContextAction.Response::new); } public static class Request extends ActionRequest { @@ -106,11 +101,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -139,11 +129,6 @@ public Response(StreamInput in) throws IOException { painlessContextInfo = in.readOptionalWriteable(PainlessContextInfo::new); } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(scriptContextNames); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index c7f468a57590e..0097aa4971040 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -95,12 +95,7 @@ public class PainlessExecuteAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, Response::new); } public static class Request extends SingleShardRequest implements ToXContentObject { @@ -380,11 +375,6 @@ public Object getResult() { return result; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeGenericValue(result); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java index 4cdc9463cdca3..42c65df5bf4f8 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java @@ -164,12 +164,12 @@ public Query existsQuery(QueryShardContext context) { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { - throw new UnsupportedOperationException("[rank_feature] fields do not support sorting, scripting or aggregating"); + throw new IllegalArgumentException("[rank_feature] fields do not support sorting, scripting or aggregating"); } @Override public Query termQuery(Object value, QueryShardContext context) { - throw new UnsupportedOperationException("Queries on [rank_feature] fields are not supported"); + throw new IllegalArgumentException("Queries on [rank_feature] fields are not supported"); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java index 80b6fe4bc3442..e0b08fffbb074 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java @@ -104,17 +104,17 @@ public String typeName() { @Override public Query existsQuery(QueryShardContext context) { - throw new UnsupportedOperationException("[rank_features] fields do not support [exists] queries"); + throw new IllegalArgumentException("[rank_features] fields do not support [exists] queries"); } @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { - throw new UnsupportedOperationException("[rank_features] fields do not support sorting, scripting or aggregating"); + throw new IllegalArgumentException("[rank_features] fields do not support sorting, scripting or aggregating"); } @Override public Query termQuery(Object value, QueryShardContext context) { - throw new UnsupportedOperationException("Queries on [rank_features] fields are not supported"); + throw new IllegalArgumentException("Queries on [rank_features] fields are not supported"); } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldTypeTests.java index e901be8688fcd..08329a0a180d3 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldTypeTests.java @@ -43,4 +43,9 @@ public void normalizeOther(MappedFieldType other) { } }); } + + public void testIsAggregatable() { + MappedFieldType fieldType = createDefaultFieldType(); + assertFalse(fieldType.isAggregatable()); + } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldTypeTests.java index 21a60b66f7683..4be603e0cd241 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldTypeTests.java @@ -26,4 +26,8 @@ protected MappedFieldType createDefaultFieldType() { return new RankFeaturesFieldMapper.RankFeaturesFieldType(); } + public void testIsAggregatable() { + MappedFieldType fieldType = createDefaultFieldType(); + assertFalse(fieldType.isAggregatable()); + } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java index 07de8c8a22cad..68b0b414ee051 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java @@ -19,22 +19,17 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for explaining evaluating search ranking results. */ -public class RankEvalAction extends StreamableResponseActionType { +public class RankEvalAction extends ActionType { public static final RankEvalAction INSTANCE = new RankEvalAction(); public static final String NAME = "indices:data/read/rank_eval"; private RankEvalAction() { - super(NAME); - } - - @Override - public RankEvalResponse newResponse() { - return new RankEvalResponse(); + super(NAME, RankEvalResponse::new); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index f02ce8fe23496..7f5aab8113dc5 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -48,7 +48,7 @@ public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { } RankEvalRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); rankingEvaluationSpec = new RankEvalSpec(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); @@ -111,11 +111,6 @@ public void indicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java index 60c2f474d7d5b..74bea1719e8e3 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java @@ -61,8 +61,22 @@ public RankEvalResponse(double metricScore, Map partia this.failures = new HashMap<>(failures); } - RankEvalResponse() { - // only used in RankEvalAction#newResponse() + RankEvalResponse(StreamInput in) throws IOException { + super(in); + this.metricScore = in.readDouble(); + int partialResultSize = in.readVInt(); + this.details = new HashMap<>(partialResultSize); + for (int i = 0; i < partialResultSize; i++) { + String queryId = in.readString(); + EvalQueryQuality partial = new EvalQueryQuality(in); + this.details.put(queryId, partial); + } + int failuresSize = in.readVInt(); + this.failures = new HashMap<>(failuresSize); + for (int i = 0; i < failuresSize; i++) { + String queryId = in.readString(); + this.failures.put(queryId, in.readException()); + } } public double getMetricScore() { @@ -97,25 +111,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.metricScore = in.readDouble(); - int partialResultSize = in.readVInt(); - this.details = new HashMap<>(partialResultSize); - for (int i = 0; i < partialResultSize; i++) { - String queryId = in.readString(); - EvalQueryQuality partial = new EvalQueryQuality(in); - this.details.put(queryId, partial); - } - int failuresSize = in.readVInt(); - this.failures = new HashMap<>(failuresSize); - for (int i = 0; i < failuresSize; i++) { - String queryId = in.readString(); - this.failures.put(queryId, in.readException()); - } - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index d92fc4504ffc8..649db936d4fbb 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -103,8 +103,7 @@ public void testSerialization() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { randomResponse.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { - RankEvalResponse deserializedResponse = new RankEvalResponse(); - deserializedResponse.readFrom(in); + RankEvalResponse deserializedResponse = new RankEvalResponse(in); assertEquals(randomResponse.getMetricScore(), deserializedResponse.getMetricScore(), Double.MIN_VALUE); assertEquals(randomResponse.getPartialResults(), deserializedResponse.getPartialResults()); assertEquals(randomResponse.getFailures().keySet(), deserializedResponse.getFailures().keySet()); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java index cf04d6d856ddb..4d1c2e4149373 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java @@ -21,18 +21,12 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.common.io.stream.Writeable; public class RethrottleAction extends ActionType { public static final RethrottleAction INSTANCE = new RethrottleAction(); public static final String NAME = "cluster:admin/reindex/rethrottle"; private RethrottleAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return ListTasksResponse::new; + super(NAME, ListTasksResponse::new); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 938ff47d60485..8ef6896f0eaeb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -885,8 +885,8 @@ void doExecute(ActionType action, Request request, ActionListener otherTransport.start()); + BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); } } @@ -260,6 +263,65 @@ public void dispatchBadRequest(final RestRequest request, assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); } + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestRequest request, + final RestChannel channel, + final ThreadContext threadContext, + final Throwable cause) { + throw new AssertionError(); + } + + }; + + final Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "elastic.co").build(); + + try (Netty4HttpServerTransport transport = + new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (Netty4HttpClient client = new Netty4HttpClient()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "elastic.co"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.post(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("elastic.co")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (Netty4HttpClient client = new Netty4HttpClient()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "elastic2.co"); + + final FullHttpResponse response = client.post(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + public void testReadTimeout() throws Exception { final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index 71585ea7a4e8e..3e09eead65674 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -30,7 +30,9 @@ import org.elasticsearch.transport.TransportLogger; @ESIntegTestCase.ClusterScope(numDataNodes = 2) -@TestLogging(value = "org.elasticsearch.transport.netty4.ESLoggingHandler:trace,org.elasticsearch.transport.TransportLogger:trace") +@TestLogging( + value = "org.elasticsearch.transport.netty4.ESLoggingHandler:trace,org.elasticsearch.transport.TransportLogger:trace", + reason = "to ensure we log network events on TRACE level") public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { private MockLogAppender appender; diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 10c91b4e8d7da..0b210995795bf 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -29,16 +29,11 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.Node; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; -import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportSettings; import java.net.InetAddress; import java.net.UnknownHostException; @@ -50,10 +45,10 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase { - public static MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, - ClusterSettings clusterSettings, boolean doHandshake) { + @Override + protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); - Transport transport = new Netty4Transport(settings, version, threadPool, new NetworkService(Collections.emptyList()), + return new Netty4Transport(settings, version, threadPool, new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { @Override @@ -66,18 +61,6 @@ public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionP } } }; - MockTransportService mockTransportService = - MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); - mockTransportService.start(); - return mockTransportService; - } - - @Override - protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { - settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), "0").build(); - MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); - transportService.start(); - return transportService; } public void testConnectException() throws UnknownHostException { @@ -91,27 +74,4 @@ public void testConnectException() throws UnknownHostException { } } - public void testBindUnavailableAddress() { - // this is on a lower level since it needs access to the TransportService before it's started - int port = serviceA.boundAddress().publishAddress().getPort(); - Settings settings = Settings.builder() - .put(Node.NODE_NAME_SETTING.getKey(), "foobar") - .put(TransportSettings.TRACE_LOG_INCLUDE_SETTING.getKey(), "") - .put(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") - .put(TransportSettings.PORT.getKey(), port) - .build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { - MockTransportService transportService = - nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true); - try { - transportService.start(); - } finally { - transportService.stop(); - transportService.close(); - } - }); - assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); - } - } diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 deleted file mode 100644 index e4657681667f1..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d61364290eb1c28696e62b7df3a7d041d3be2fa5 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..e072bc01faf90 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +da0465f77ffacb36672dcd6075319e02dbe76673 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 deleted file mode 100644 index fff37598a0861..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f78b18890a0a0e74a8249806a6cfcabd2fae304 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..4308cf1e5e651 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +cd4d0398da5d78187cb636f19b4b81d05e238cd4 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 deleted file mode 100644 index 47b0c633fdc79..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bfc6b5d67a792aa23ee134fe93307696aad94223 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..a97c558f16c45 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +740e2a96d58fdf2299e5e9c3e156c33b1b2b7d60 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 deleted file mode 100644 index d24096b883fc9..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6fac1ff799b86f872b67e7fad55120d338daa86f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..7949cdcb19e7e --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +4654bbbfdd81356a07f382bb0f8cadf6c6ba81f6 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 deleted file mode 100644 index 9ed51a53f6226..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -72941af5e1bfb012aec04dd518a2deb43402702c \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..135954405a9f8 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +d5dc5048018786e5b0719263533e74f4ba144ed3 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 deleted file mode 100644 index 190a7031928b8..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0ac885595cfdc0267d7d9cb843c22dabf7215ff0 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..fdafa7fe20e71 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +8c541cc37972991a56153474679357a262c2026c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 deleted file mode 100644 index 7f2d4c5e8647e..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e260cff7f48e350e1ec037dec1c260ce05ddb53e \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..beb30ff476a6e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +4ac60095d79c31625baa58f24ae5eec4ef23c9a4 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index c5b310c14817a..e723520f5c8b5 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -280,7 +280,7 @@ private static String getProtocol() { } else { JavaVersion full = AccessController.doPrivileged( - (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.specification.version"))); + (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { return "TLSv1.2"; } diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 738a3be86afab..6b1a78e352f58 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -33,7 +33,7 @@ dependencies { } testClusters.integTest { - distribution = 'oss' + testDistribution = 'oss' } test.enabled = false diff --git a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java index 7f4855f8880b7..14f4d4f8ba326 100644 --- a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java +++ b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java @@ -189,7 +189,7 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r } data = ((AtomicNumericFieldData) fd).getDoubleValues(); } - if (false == data.advanceExact(topDocs.scoreDocs[i].doc)) { + if (false == data.advanceExact(topDocs.scoreDocs[i].doc - leaf.docBase)) { throw new IllegalArgumentException("document [" + topDocs.scoreDocs[i].doc + "] does not have the field [" + context.factorField.getFieldName() + "]"); } diff --git a/plugins/examples/security-authorization-engine/build.gradle b/plugins/examples/security-authorization-engine/build.gradle index 787cc230eeb18..db66051677f51 100644 --- a/plugins/examples/security-authorization-engine/build.gradle +++ b/plugins/examples/security-authorization-engine/build.gradle @@ -32,7 +32,7 @@ testClusters.integTest { // This is important, so that all the modules are available too. // There are index templates that use token filters that are in analysis-module and // processors are being used that are in ingest-common module. - distribution = 'default' + testDistribution = 'DEFAULT' user role: 'custom_superuser' } diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle b/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle index 5f96fd9dc305b..ddde107413c15 100644 --- a/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle +++ b/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle @@ -70,11 +70,9 @@ testClusters.integTest { plugin file(project(':plugins:repository-azure').bundlePlugin.archiveFile) keystore 'azure.client.integration_test.account', azureAccount if (azureKey != null && azureKey.isEmpty() == false) { - println "Using access key in external service tests." keystore 'azure.client.integration_test.key', azureKey } if (azureSasToken != null && azureSasToken.isEmpty() == false) { - println "Using SAS token in external service tests." keystore 'azure.client.integration_test.sas_token', azureSasToken } @@ -86,7 +84,5 @@ testClusters.integTest { { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${azureStorageFixture.addressAndPort }" }, IGNORE_VALUE String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0) setting 'thread_pool.repository_azure.max', (Math.abs(Long.parseUnsignedLong(firstPartOfSeed, 16) % 10) + 1).toString(), System.getProperty('ignore.tests.seed') == null ? DEFAULT : IGNORE_VALUE - } else { - println "Using an external service to test the repository-azure plugin" } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 12113542dee44..15d1b37ecf817 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -57,8 +57,7 @@ public class AzureBlobContainer extends AbstractBlobContainer { this.threadPool = threadPool; } - @Override - public boolean blobExists(String blobName) { + private boolean blobExists(String blobName) { logger.trace("blobExists({})", blobName); try { return blobStore.blobExists(buildKey(blobName)); diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 75d4ad92fbf8e..4657ece3c8a2f 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStoreException; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import java.io.IOException; @@ -42,15 +41,6 @@ class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { this.path = path.buildAsString(); } - @Override - public boolean blobExists(String blobName) { - try { - return blobStore.blobExists(buildKey(blobName)); - } catch (Exception e) { - throw new BlobStoreException("Failed to check if blob [" + blobName + "] exists", e); - } - } - @Override public Map listBlobs() throws IOException { return blobStore.listBlobs(path); diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 2477fd2962bde..c3fd4848a0c3c 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -163,18 +163,6 @@ Map listChildren(BlobPath path) throws IOException { return mapBuilder.immutableMap(); } - /** - * Returns true if the blob exists in the specific bucket - * - * @param blobName name of the blob - * @return true iff the blob exists - */ - boolean blobExists(String blobName) throws IOException { - final BlobId blobId = BlobId.of(bucketName, blobName); - final Blob blob = SocketAccess.doPrivilegedIOException(() -> client().get(blobId)); - return blob != null; - } - /** * Returns an {@link java.io.InputStream} for the given blob name * diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 3060867b01b08..0b0c53f417db5 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -91,13 +91,13 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" - onlyIf { project(':test:fixtures:krb5kdc-fixture').buildFixture.enabled } + maxWaitInSeconds 60 + onlyIf { project(':test:fixtures:krb5kdc-fixture').buildFixture.enabled && project.inFipsJvm == false } waitCondition = { fixture, ant -> // the hdfs.MiniHDFS fixture writes the ports file when // it's ready, so we can just wait for the file to exist return fixture.portsFile.exists() } - final List miniHDFSArgs = [] // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options @@ -125,7 +125,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', } } -Set disabledIntegTestTaskNames = ['integTestSecure', 'integTestSecureHa'] +Set disabledIntegTestTaskNames = [] for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) { task "${integTestTaskName}"(type: RestIntegTestTask) { @@ -136,10 +136,36 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec enabled = false; } + if (integTestTaskName.contains("Secure")) { + if (integTestTaskName.contains("Ha")) { + dependsOn secureHaHdfsFixture + } else { + dependsOn secureHdfsFixture + } + } + runner { + onlyIf { project.inFipsJvm == false } + if (integTestTaskName.contains("Ha")) { + if (integTestTaskName.contains("Secure")) { + Path path = buildDir.toPath() + .resolve("fixtures") + .resolve("secureHaHdfsFixture") + .resolve("ports") + nonInputProperties.systemProperty "test.hdfs-fixture.ports", path + classpath += files(path) + } else { + Path path = buildDir.toPath() + .resolve("fixtures") + .resolve("haHdfsFixture") + .resolve("ports") + nonInputProperties.systemProperty "test.hdfs-fixture.ports", path + classpath += files(path) + } + } + if (integTestTaskName.contains("Secure")) { if (disabledIntegTestTaskNames.contains(integTestTaskName) == false) { - dependsOn secureHdfsFixture nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" jvmArgs "-Djava.security.krb5.conf=${krb5conf}" @@ -164,7 +190,6 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec } } - // Determine HDFS Fixture compatibility for the current build environment. boolean fixtureSupported = false if (Os.isFamily(Os.FAMILY_WINDOWS)) { @@ -193,6 +218,7 @@ if (legalPath == false) { // Always ignore HA integration tests in the normal integration test runner, they are included below as // part of their own HA-specific integration test tasks. integTest.runner { + onlyIf { project.inFipsJvm == false } exclude('**/Ha*TestSuiteIT.class') } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index b050645f9952c..e4c9af4d6c70f 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -58,15 +58,6 @@ final class HdfsBlobContainer extends AbstractBlobContainer { this.bufferSize = bufferSize; } - @Override - public boolean blobExists(String blobName) { - try { - return store.execute(fileContext -> fileContext.util().exists(new Path(path, blobName))); - } catch (Exception e) { - return false; - } - } - @Override public void deleteBlob(String blobName) throws IOException { try { diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index 0248576b57384..e29cd14befdb7 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -19,16 +19,6 @@ package org.elasticsearch.repositories.hdfs; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.BadFencingConfigurationException; import org.apache.hadoop.ha.HAServiceProtocol; @@ -46,6 +36,16 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.Assert; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; + /** * Integration test that runs against an HA-Enabled HDFS instance */ @@ -57,13 +57,24 @@ public void testHAFailoverWithRepository() throws Exception { String esKerberosPrincipal = System.getProperty("test.krb5.principal.es"); String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs"); String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs"); + String ports = System.getProperty("test.hdfs-fixture.ports"); + String nn1Port = "10001"; + String nn2Port = "10002"; + if (ports.length() > 0) { + final Path path = PathUtils.get(ports); + final List lines = AccessController.doPrivileged((PrivilegedExceptionAction>) () -> { + return Files.readAllLines(path); + }); + nn1Port = lines.get(0); + nn2Port = lines.get(1); + } boolean securityEnabled = hdfsKerberosPrincipal != null; Configuration hdfsConfiguration = new Configuration(); hdfsConfiguration.set("dfs.nameservices", "ha-hdfs"); hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2"); - hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001"); - hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002"); + hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + nn1Port); + hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + nn2Port); hdfsConfiguration.set( "dfs.client.failover.proxy.provider.ha-hdfs", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" @@ -110,8 +121,8 @@ public void testHAFailoverWithRepository() throws Exception { securityCredentials(securityEnabled, esKerberosPrincipal) + "\"conf.dfs.nameservices\": \"ha-hdfs\"," + "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," + - "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\"," + - "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\"," + + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:"+nn1Port+"\"," + + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:"+nn2Port+"\"," + "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " + "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + "}" + diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index ba00862e93848..c0b0b7a307d49 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; +import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import javax.security.auth.Subject; import java.io.IOException; @@ -137,6 +138,6 @@ public void testReadOnly() throws Exception { byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); writeBlob(container, "foo", new BytesArray(data), randomBoolean()); assertArrayEquals(readBlobFully(container, "foo", data.length), data); - assertTrue(container.blobExists("foo")); + assertTrue(BlobStoreTestUtil.blobExists(container, "foo")); } } diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml index 20d988884113f..2c4fcc338ab07 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml @@ -48,8 +48,8 @@ repository: test_snapshot_get_repository snapshot: test_snapshot_get - - length: { snapshots: 1 } - - match: { snapshots.0.snapshot : test_snapshot_get } + - length: { responses.0.snapshots: 1 } + - match: { responses.0.snapshots.0.snapshot : test_snapshot_get } # List snapshot info - do: @@ -57,8 +57,8 @@ repository: test_snapshot_get_repository snapshot: "*" - - length: { snapshots: 1 } - - match: { snapshots.0.snapshot : test_snapshot_get } + - length: { responses.0.snapshots: 1 } + - match: { responses.0.snapshots.0.snapshot : test_snapshot_get } # Remove our snapshot - do: diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml index 8c4c0347a156a..c31749072a17b 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml @@ -23,7 +23,7 @@ repository: test_snapshot_repository_ro snapshot: "_all" - - length: { snapshots: 1 } + - length: { responses.0.snapshots: 1 } # Remove our repository - do: diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 0807d44c03638..af10aa079ee7c 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -101,6 +101,8 @@ String s3EC2BasePath = System.getenv("amazon_s3_base_path_ec2") String s3ECSBucket = System.getenv("amazon_s3_bucket_ecs") String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs") +boolean s3DisableChunkedEncoding = (new Random(Long.parseUnsignedLong(project.rootProject.testSeed.tokenize(':').get(0), 16))).nextBoolean() + // If all these variables are missing then we are testing against the internal fixture instead, which has the following // credentials hard-coded in. @@ -167,8 +169,6 @@ if (useFixture) { 'http://127.0.0.1:' + minioPort } - File minioAddressFile = new File(project.buildDir, 'generated-resources/s3Fixture.address') - normalization { runtimeClasspath { // ignore generated address file for the purposes of build avoidance @@ -178,12 +178,7 @@ if (useFixture) { thirdPartyTest { dependsOn tasks.bundlePlugin, tasks.postProcessFixture - outputs.file(minioAddressFile) - doFirst { - file(minioAddressFile).text = "${ -> minioAddress.call() }" - } - // TODO: this could be a nonInputProperties.systemProperty so we don't need a file - systemProperty 'test.s3.endpoint', minioAddressFile.name + nonInputProperties.systemProperty 'test.s3.endpoint', "${ -> minioAddress.call() }" } task integTestMinio(type: RestIntegTestTask) { @@ -236,7 +231,8 @@ task s3FixtureProperties { "s3Fixture.temporary_key" : s3TemporaryAccessKey, "s3Fixture.temporary_session_token": s3TemporarySessionToken, "s3Fixture.ec2_bucket_name" : s3EC2Bucket, - "s3Fixture.ecs_bucket_name" : s3ECSBucket + "s3Fixture.ecs_bucket_name" : s3ECSBucket, + "s3Fixture.disableChunkedEncoding" : s3DisableChunkedEncoding ] doLast { @@ -264,7 +260,8 @@ processTestResources { 'ec2_bucket': s3EC2Bucket, 'ec2_base_path': s3EC2BasePath, 'ecs_bucket': s3ECSBucket, - 'ecs_base_path': s3ECSBasePath + 'ecs_base_path': s3ECSBasePath, + 'disable_chunked_encoding': s3DisableChunkedEncoding, ] inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index c2e1f3de7f0dd..9aab721f3c407 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -42,11 +42,9 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStoreException; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.util.Maps; import java.io.IOException; import java.io.InputStream; @@ -56,9 +54,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; import java.util.stream.Collectors; -import static java.util.Map.entry; import static org.elasticsearch.repositories.s3.S3Repository.MAX_FILE_SIZE; import static org.elasticsearch.repositories.s3.S3Repository.MAX_FILE_SIZE_USING_MULTIPART; import static org.elasticsearch.repositories.s3.S3Repository.MIN_PART_SIZE_USING_MULTIPART; @@ -80,15 +78,6 @@ class S3BlobContainer extends AbstractBlobContainer { this.keyPath = path.buildAsString(); } - @Override - public boolean blobExists(String blobName) { - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - return SocketAccess.doPrivileged(() -> clientReference.client().doesObjectExist(blobStore.bucket(), buildKey(blobName))); - } catch (final Exception e) { - throw new BlobStoreException("Failed to check if blob [" + blobName +"] exists", e); - } - } - @Override public InputStream readBlob(String blobName) throws IOException { try (AmazonS3Reference clientReference = blobStore.clientReference()) { @@ -238,36 +227,12 @@ public void deleteBlobIgnoringIfNotExists(String blobName) throws IOException { @Override public Map listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException { - final var entries = new ArrayList>(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { - ObjectListing prevListing = null; - while (true) { - ObjectListing list; - if (prevListing != null) { - final ObjectListing finalPrevListing = prevListing; - list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); - } else { - final ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); - listObjectsRequest.setBucketName(blobStore.bucket()); - listObjectsRequest.setDelimiter("/"); - if (blobNamePrefix != null) { - listObjectsRequest.setPrefix(buildKey(blobNamePrefix)); - } else { - listObjectsRequest.setPrefix(keyPath); - } - list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); - } - for (final S3ObjectSummary summary : list.getObjectSummaries()) { - final String name = summary.getKey().substring(keyPath.length()); - entries.add(entry(name, new PlainBlobMetaData(name, summary.getSize()))); - } - if (list.isTruncated()) { - prevListing = list; - } else { - break; - } - } - return Maps.ofEntries(entries); + return executeListing(clientReference, listObjectsRequest(blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix))) + .stream() + .flatMap(listing -> listing.getObjectSummaries().stream()) + .map(summary -> new PlainBlobMetaData(summary.getKey().substring(keyPath.length()), summary.getSize())) + .collect(Collectors.toMap(PlainBlobMetaData::name, Function.identity())); } catch (final AmazonClientException e) { throw new IOException("Exception when listing blobs by prefix [" + blobNamePrefix + "]", e); } @@ -281,49 +246,52 @@ public Map listBlobs() throws IOException { @Override public Map children() throws IOException { try (AmazonS3Reference clientReference = blobStore.clientReference()) { - ObjectListing prevListing = null; - final var entries = new ArrayList>(); - while (true) { - ObjectListing list; - if (prevListing != null) { - final ObjectListing finalPrevListing = prevListing; - list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); - } else { - final ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); - listObjectsRequest.setBucketName(blobStore.bucket()); - listObjectsRequest.setPrefix(keyPath); - listObjectsRequest.setDelimiter("/"); - list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); - } - for (final String summary : list.getCommonPrefixes()) { - final String name = summary.substring(keyPath.length()); - if (name.isEmpty() == false) { - // Stripping the trailing slash off of the common prefix - final String last = name.substring(0, name.length() - 1); - final BlobPath path = path().add(last); - entries.add(entry(last, blobStore.blobContainer(path))); - } - } - assert list.getObjectSummaries().stream().noneMatch(s -> { - for (String commonPrefix : list.getCommonPrefixes()) { - if (s.getKey().substring(keyPath.length()).startsWith(commonPrefix)) { - return true; + return executeListing(clientReference, listObjectsRequest(keyPath)).stream().flatMap(listing -> { + assert listing.getObjectSummaries().stream().noneMatch(s -> { + for (String commonPrefix : listing.getCommonPrefixes()) { + if (s.getKey().substring(keyPath.length()).startsWith(commonPrefix)) { + return true; + } } - } - return false; - }) : "Response contained children for listed common prefixes."; - if (list.isTruncated()) { - prevListing = list; - } else { - break; - } - } - return Maps.ofEntries(entries); + return false; + }) : "Response contained children for listed common prefixes."; + return listing.getCommonPrefixes().stream(); + }) + .map(prefix -> prefix.substring(keyPath.length())) + .filter(name -> name.isEmpty() == false) + // Stripping the trailing slash off of the common prefix + .map(name -> name.substring(0, name.length() - 1)) + .collect(Collectors.toMap(Function.identity(), name -> blobStore.blobContainer(path().add(name)))); } catch (final AmazonClientException e) { - throw new IOException("Exception when listing children of [" + path().buildAsString() + ']', e); + throw new IOException("Exception when listing children of [" + path().buildAsString() + ']', e); } } + private static List executeListing(AmazonS3Reference clientReference, ListObjectsRequest listObjectsRequest) { + final List results = new ArrayList<>(); + ObjectListing prevListing = null; + while (true) { + ObjectListing list; + if (prevListing != null) { + final ObjectListing finalPrevListing = prevListing; + list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); + } else { + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); + } + results.add(list); + if (list.isTruncated()) { + prevListing = list; + } else { + break; + } + } + return results; + } + + private ListObjectsRequest listObjectsRequest(String keyPath) { + return new ListObjectsRequest().withBucketName(blobStore.bucket()).withPrefix(keyPath).withDelimiter("/"); + } + private String buildKey(String blobName) { return keyPath + blobName; } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index ae2bd2e905bf6..fee00786a2ab3 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -99,6 +99,10 @@ final class S3ClientSettings { static final Setting.AffixSetting USE_PATH_STYLE_ACCESS = Setting.affixKeySetting(PREFIX, "path_style_access", key -> Setting.boolSetting(key, false, Property.NodeScope)); + /** Whether chunked encoding should be disabled or not (Default is false). */ + static final Setting.AffixSetting DISABLE_CHUNKED_ENCODING = Setting.affixKeySetting(PREFIX, "disable_chunked_encoding", + key -> Setting.boolSetting(key, false, Property.NodeScope)); + /** Credentials to authenticate with s3. */ final S3BasicCredentials credentials; @@ -134,10 +138,13 @@ final class S3ClientSettings { /** Whether the s3 client should use path style access. */ final boolean pathStyleAccess; + /** Whether chunked encoding should be disabled or not. */ + final boolean disableChunkedEncoding; + private S3ClientSettings(S3BasicCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, String proxyUsername, String proxyPassword, int readTimeoutMillis, int maxRetries, boolean throttleRetries, - boolean pathStyleAccess) { + boolean pathStyleAccess, boolean disableChunkedEncoding) { this.credentials = credentials; this.endpoint = endpoint; this.protocol = protocol; @@ -149,6 +156,7 @@ private S3ClientSettings(S3BasicCredentials credentials, String endpoint, Protoc this.maxRetries = maxRetries; this.throttleRetries = throttleRetries; this.pathStyleAccess = pathStyleAccess; + this.disableChunkedEncoding = disableChunkedEncoding; } /** @@ -172,6 +180,8 @@ S3ClientSettings refine(RepositoryMetaData metadata) { final int newMaxRetries = getRepoSettingOrDefault(MAX_RETRIES_SETTING, normalizedSettings, maxRetries); final boolean newThrottleRetries = getRepoSettingOrDefault(USE_THROTTLE_RETRIES_SETTING, normalizedSettings, throttleRetries); final boolean usePathStyleAccess = getRepoSettingOrDefault(USE_PATH_STYLE_ACCESS, normalizedSettings, pathStyleAccess); + final boolean newDisableChunkedEncoding = getRepoSettingOrDefault( + DISABLE_CHUNKED_ENCODING, normalizedSettings, disableChunkedEncoding); final S3BasicCredentials newCredentials; if (checkDeprecatedCredentials(repoSettings)) { newCredentials = loadDeprecatedCredentials(repoSettings); @@ -180,7 +190,8 @@ S3ClientSettings refine(RepositoryMetaData metadata) { } if (Objects.equals(endpoint, newEndpoint) && protocol == newProtocol && Objects.equals(proxyHost, newProxyHost) && proxyPort == newProxyPort && newReadTimeoutMillis == readTimeoutMillis && maxRetries == newMaxRetries - && newThrottleRetries == throttleRetries && Objects.equals(credentials, newCredentials)) { + && newThrottleRetries == throttleRetries && Objects.equals(credentials, newCredentials) + && newDisableChunkedEncoding == disableChunkedEncoding) { return this; } return new S3ClientSettings( @@ -194,7 +205,8 @@ S3ClientSettings refine(RepositoryMetaData metadata) { newReadTimeoutMillis, newMaxRetries, newThrottleRetries, - usePathStyleAccess + usePathStyleAccess, + newDisableChunkedEncoding ); } @@ -282,7 +294,8 @@ static S3ClientSettings getClientSettings(final Settings settings, final String Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()), getConfigValue(settings, clientName, MAX_RETRIES_SETTING), getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING), - getConfigValue(settings, clientName, USE_PATH_STYLE_ACCESS) + getConfigValue(settings, clientName, USE_PATH_STYLE_ACCESS), + getConfigValue(settings, clientName, DISABLE_CHUNKED_ENCODING) ); } } @@ -305,13 +318,14 @@ public boolean equals(final Object o) { protocol == that.protocol && Objects.equals(proxyHost, that.proxyHost) && Objects.equals(proxyUsername, that.proxyUsername) && - Objects.equals(proxyPassword, that.proxyPassword); + Objects.equals(proxyPassword, that.proxyPassword) && + Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding); } @Override public int hashCode() { return Objects.hash(credentials, endpoint, protocol, proxyHost, proxyPort, proxyUsername, proxyPassword, - readTimeoutMillis, maxRetries, throttleRetries); + readTimeoutMillis, maxRetries, throttleRetries, disableChunkedEncoding); } private static T getConfigValue(Settings settings, String clientName, diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 3b232354ddfea..f7ae303a1c2aa 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -157,6 +157,9 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { if (clientSettings.pathStyleAccess) { builder.enablePathStyleAccess(); } + if (clientSettings.disableChunkedEncoding) { + builder.disableChunkedEncoding(); + } return builder.build(); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java index 51b1d5159edfe..e0434d1e50f1d 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.fixture.AbstractHttpFixture; import com.amazonaws.util.DateUtils; +import com.amazonaws.util.IOUtils; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.Streams; @@ -75,6 +76,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture { /** Request handlers for the requests made by the S3 client **/ private final PathTrie handlers; + private final boolean disableChunkedEncoding; /** * Creates a {@link AmazonS3Fixture} */ @@ -92,6 +94,8 @@ private AmazonS3Fixture(final String workingDir, Properties properties) { randomAsciiAlphanumOfLength(random, 10), randomAsciiAlphanumOfLength(random, 10)); this.handlers = defaultHandlers(buckets, ec2Bucket, ecsBucket); + + this.disableChunkedEncoding = Boolean.parseBoolean(prop(properties, "s3Fixture.disableChunkedEncoding")); } private static String nonAuthPath(Request request) { @@ -216,13 +220,16 @@ private PathTrie defaultHandlers(final Map bucke final String destObjectName = objectName(request.getParameters()); - // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip" - // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here. - // - // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html - // String headerDecodedContentLength = request.getHeader("X-amz-decoded-content-length"); if (headerDecodedContentLength != null) { + if (disableChunkedEncoding) { + return newInternalError(request.getId(), "Something is wrong with this PUT request"); + } + // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip" + // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here. + // + // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + // int contentLength = Integer.valueOf(headerDecodedContentLength); // Chunked requests have a payload like this: @@ -246,9 +253,18 @@ private PathTrie defaultHandlers(final Map bucke destBucket.objects.put(destObjectName, bytes); return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE); } - } + } else { + if (disableChunkedEncoding == false) { + return newInternalError(request.getId(), "Something is wrong with this PUT request"); + } + // Read from body directly + try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(request.getBody()))) { + byte[] bytes = IOUtils.toByteArray(inputStream); - return newInternalError(request.getId(), "Something is wrong with this PUT request"); + destBucket.objects.put(destObjectName, bytes); + return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE); + } + } }) ); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index 312d9649aa375..9f18d1588047f 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -151,4 +151,11 @@ public void testPathStyleAccessCanBeSet() { assertThat(settings.get("default").pathStyleAccess, is(false)); assertThat(settings.get("other").pathStyleAccess, is(true)); } + + public void testUseChunkedEncodingCanBeSet() { + final Map settings = S3ClientSettings.load( + Settings.builder().put("s3.client.other.disable_chunked_encoding", true).build()); + assertThat(settings.get("default").disableChunkedEncoding, is(false)); + assertThat(settings.get("other").disableChunkedEncoding, is(true)); + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 7e7ac8d430062..28fd9c72f0868 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -27,12 +27,10 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; -import org.elasticsearch.test.StreamsUtils; -import java.io.IOException; import java.util.Collection; -import java.util.concurrent.Executor; import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.blankOrNullString; @@ -63,13 +61,9 @@ protected void createRepository(String repoName) { Settings.Builder settings = Settings.builder() .put("bucket", System.getProperty("test.s3.bucket")) .put("base_path", System.getProperty("test.s3.base", "testpath")); - final String endpointPath = System.getProperty("test.s3.endpoint"); - if (endpointPath != null) { - try { - settings = settings.put("endpoint", StreamsUtils.copyToStringFromClasspath("/" + endpointPath)); - } catch (IOException e) { - throw new AssertionError(e); - } + final String endpoint = System.getProperty("test.s3.endpoint"); + if (endpoint != null) { + settings = settings.put("endpoint", endpoint); } AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") .setType("s3") diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index d319bf8984a97..57b2e42bb2503 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -15,6 +15,7 @@ setup: base_path: "${permanent_base_path}" canned_acl: private storage_class: standard + disable_chunked_encoding: ${disable_chunked_encoding} # Remove the snapshots, if a previous test failed to delete them. This is # useful for third party tests that runs the test against a real external service. diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 3ad6c3959634b..1dff4d1ae74f1 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -15,6 +15,7 @@ setup: base_path: "${temporary_base_path}" canned_acl: private storage_class: standard + disable_chunked_encoding: ${disable_chunked_encoding} --- "Snapshot and Restore with repository-s3 using temporary credentials": diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml index fa1d3fc10fb13..b7c22eccf092e 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml @@ -15,6 +15,7 @@ setup: base_path: "${ec2_base_path}" canned_acl: private storage_class: standard + disable_chunked_encoding: ${disable_chunked_encoding} --- "Snapshot and Restore with repository-s3 using ec2 credentials": diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml index 99736fb25ff24..ba839d3fb98f2 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml @@ -15,6 +15,7 @@ setup: base_path: "${ecs_base_path}" canned_acl: private storage_class: standard + disable_chunked_encoding: ${disable_chunked_encoding} --- "Snapshot and Restore with repository-s3 using ecs credentials": diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index 3c3406b0eda45..795cb42b805d5 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -58,7 +58,7 @@ public class HttpReadWriteHandler implements NioChannelHandler { private final TaskScheduler taskScheduler; private final LongSupplier nanoClock; private final long readTimeoutNanos; - private boolean channelRegistered = false; + private boolean channelActive = false; private boolean requestSinceReadTimeoutTrigger = false; private int inFlightRequests = 0; @@ -91,8 +91,8 @@ public HttpReadWriteHandler(NioHttpChannel nioHttpChannel, NioHttpServerTranspor } @Override - public void channelRegistered() { - channelRegistered = true; + public void channelActive() { + channelActive = true; if (readTimeoutNanos > 0) { scheduleReadTimeout(); } @@ -100,7 +100,7 @@ public void channelRegistered() { @Override public int consumeReads(InboundChannelBuffer channelBuffer) { - assert channelRegistered : "channelRegistered should have been called"; + assert channelActive : "channelActive should have been called"; int bytesConsumed = adaptor.read(channelBuffer.sliceAndRetainPagesTo(channelBuffer.getIndex())); Object message; while ((message = adaptor.pollInboundMessage()) != null) { @@ -123,7 +123,7 @@ public WriteOperation createWriteOperation(SocketChannelContext context, Object public List writeToBytes(WriteOperation writeOperation) { assert writeOperation.getObject() instanceof NioHttpResponse : "This channel only supports messages that are of type: " + NioHttpResponse.class + ". Found type: " + writeOperation.getObject().getClass() + "."; - assert channelRegistered : "channelRegistered should have been called"; + assert channelActive : "channelActive should have been called"; --inFlightRequests; assert inFlightRequests >= 0 : "Inflight requests should never drop below zero, found: " + inFlightRequests; adaptor.write(writeOperation); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java index 9ee66341a487a..4ff62ebb49250 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -99,7 +99,7 @@ public void setMocks() { CorsHandler.Config corsConfig = CorsHandler.disabled(); handler = new HttpReadWriteHandler(channel, transport, httpHandlingSettings, corsConfig, taskScheduler, System::nanoTime); - handler.channelRegistered(); + handler.channelActive(); } public void testSuccessfulDecodeHttpRequest() throws IOException { @@ -333,7 +333,7 @@ public void testReadTimeout() throws IOException { Iterator timeValues = Arrays.asList(0, 2, 4, 6, 8).iterator(); handler = new HttpReadWriteHandler(channel, transport, httpHandlingSettings, corsConfig, taskScheduler, timeValues::next); - handler.channelRegistered(); + handler.channelActive(); prepareHandlerForResponse(handler); SocketChannelContext context = mock(SocketChannelContext.class); @@ -380,7 +380,7 @@ private FullHttpResponse executeCorsRequest(final Settings settings, final Strin CorsHandler.Config corsConfig = CorsHandler.fromSettings(settings); HttpReadWriteHandler handler = new HttpReadWriteHandler(channel, transport, httpSettings, corsConfig, taskScheduler, System::nanoTime); - handler.channelRegistered(); + handler.channelActive(); prepareHandlerForResponse(handler); DefaultFullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); if (originValue != null) { diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java index ed55007f3ba61..c430f2b57d325 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java @@ -110,7 +110,7 @@ public Collection get(InetSocketAddress remoteAddress, String. return sendRequests(remoteAddress, requests); } - public final FullHttpResponse post(InetSocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { Collection responses = sendRequests(remoteAddress, Collections.singleton(httpRequest)); assert responses.size() == 1 : "expected 1 and only 1 http response"; return responses.iterator().next(); @@ -227,7 +227,7 @@ private HttpClientHandler(NioSocketChannel channel, CountDownLatch latch, Collec } @Override - public void channelRegistered() {} + public void channelActive() {} @Override public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener) { @@ -271,7 +271,7 @@ public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { int bytesConsumed = adaptor.read(channelBuffer.sliceAndRetainPagesTo(channelBuffer.getIndex())); Object message; while ((message = adaptor.pollInboundMessage()) != null) { - handleRequest(message); + handleResponse(message); } return bytesConsumed; @@ -286,12 +286,18 @@ public boolean closeNow() { public void close() throws IOException { try { adaptor.close(); + // After closing the pipeline, we must poll to see if any new messages are available. This + // is because HTTP supports a channel being closed as an end of content marker. + Object message; + while ((message = adaptor.pollInboundMessage()) != null) { + handleResponse(message); + } } catch (Exception e) { throw new IOException(e); } } - private void handleRequest(Object message) { + private void handleResponse(Object message) { final FullHttpResponse response = (FullHttpResponse) message; DefaultFullHttpResponse newResponse = new DefaultFullHttpResponse(response.protocolVersion(), response.status(), diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java index bc3208eb29809..0c16cd01aaa90 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.BindHttpException; +import org.elasticsearch.http.CorsHandler; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; @@ -66,6 +67,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.OK; import static org.hamcrest.Matchers.containsString; @@ -159,13 +162,13 @@ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadC request.headers().set(HttpHeaderNames.EXPECT, expectation); HttpUtil.setContentLength(request, contentLength); - final FullHttpResponse response = client.post(remoteAddress.address(), request); + final FullHttpResponse response = client.send(remoteAddress.address(), request); try { assertThat(response.status(), equalTo(expectedStatus)); if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) { final FullHttpRequest continuationRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", Unpooled.EMPTY_BUFFER); - final FullHttpResponse continuationResponse = client.post(remoteAddress.address(), continuationRequest); + final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), continuationRequest); try { assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); assertThat( @@ -196,6 +199,65 @@ threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settin } } + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestRequest request, + final RestChannel channel, + final ThreadContext threadContext, + final Throwable cause) { + throw new AssertionError(); + } + + }; + + final Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "elastic.co").build(); + + try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, + threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger))) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (NioHttpClient client = new NioHttpClient()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "elastic.co"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("elastic.co")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (NioHttpClient client = new NioHttpClient()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "elastic2.co"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + public void testBadRequest() throws InterruptedException { final AtomicReference causeReference = new AtomicReference<>(); final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { @@ -241,7 +303,7 @@ threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); - final FullHttpResponse response = client.post(remoteAddress.address(), request); + final FullHttpResponse response = client.send(remoteAddress.address(), request); try { assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST)); assertThat( diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java index fac509a0e868a..4724cfac6942e 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java @@ -30,7 +30,7 @@ import org.elasticsearch.transport.TransportLogger; @ESIntegTestCase.ClusterScope(numDataNodes = 2) -@TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace") +@TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace", reason = "to ensure we log network events on TRACE level") public class NioTransportLoggingIT extends NioIntegTestCase { private MockLogAppender appender; @@ -77,4 +77,5 @@ public void testLoggingHandler() throws IllegalAccessException { client().admin().cluster().nodesHotThreads(new NodesHotThreadsRequest()).actionGet(); appender.assertAllExpectationsMatched(); } + } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index a78e924298a9e..5982831c50036 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -29,16 +29,11 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.Node; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; -import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportSettings; import java.io.IOException; import java.net.InetAddress; @@ -52,11 +47,11 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase { - public MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, - ClusterSettings clusterSettings, boolean doHandshake) { + @Override + protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); NetworkService networkService = new NetworkService(Collections.emptyList()); - Transport transport = new NioTransport(settings, version, threadPool, networkService, new MockPageCacheRecycler(settings), + return new NioTransport(settings, version, threadPool, networkService, new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService(), new NioGroupFactory(settings, logger)) { @Override @@ -69,20 +64,6 @@ public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionP } } }; - MockTransportService mockTransportService = - MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); - mockTransportService.start(); - return mockTransportService; - } - - @Override - protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { - settings = Settings.builder().put(settings) - .put(TransportSettings.PORT.getKey(), "0") - .build(); - MockTransportService transportService = nioFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); - transportService.start(); - return transportService; } public void testConnectException() throws UnknownHostException { @@ -97,26 +78,4 @@ public void testConnectException() throws UnknownHostException { assertThat(cause, instanceOf(IOException.class)); } } - - public void testBindUnavailableAddress() { - // this is on a lower level since it needs access to the TransportService before it's started - int port = serviceA.boundAddress().publishAddress().getPort(); - Settings settings = Settings.builder() - .put(Node.NODE_NAME_SETTING.getKey(), "foobar") - .put(TransportSettings.TRACE_LOG_INCLUDE_SETTING.getKey(), "") - .put(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") - .put(TransportSettings.PORT.getKey(), port) - .build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { - MockTransportService transportService = nioFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true); - try { - transportService.start(); - } finally { - transportService.stop(); - transportService.close(); - } - }); - assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); - } } diff --git a/qa/build.gradle b/qa/build.gradle index 9266a09b25735..2d3ca8282d4ca 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -11,11 +11,8 @@ subprojects { Project subproj -> } } plugins.withType(TestClustersPlugin).whenPluginAdded { - afterEvaluate { - // We need to delay this so it's not overwritten in RestIntegTestTask - testClusters.all { - distribution = System.getProperty('tests.distribution', 'oss').toUpperCase() - } + testClusters.all { + testDistribution = System.getProperty('tests.distribution', 'oss').toUpperCase() } } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java index 15dea1adef7b5..1c34e07529a98 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java @@ -136,6 +136,7 @@ public void testEnvironmentPaths() throws Exception { } public void testDuplicateDataPaths() throws IOException { + assumeFalse("https://github.com/elastic/elasticsearch/issues/44558", Constants.WINDOWS); final Path path = createTempDir(); final Path home = path.resolve("home"); final Path data = path.resolve("data"); diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java new file mode 100644 index 0000000000000..655424ce5b040 --- /dev/null +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.logging; + + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; +import org.junit.BeforeClass; + + +public class ESJsonLayoutTests extends ESTestCase { + @BeforeClass + public static void initNodeName() { + JsonLogsTestSetup.init(); + } + + public void testEmptyType() { + expectThrows(IllegalArgumentException.class, () -> ESJsonLayout.newBuilder().build()); + } + + public void testLayout() { + ESJsonLayout server = ESJsonLayout.newBuilder() + .setType("server") + .build(); + String conversionPattern = server.getPatternLayout().getConversionPattern(); + + assertThat(conversionPattern, Matchers.equalTo( + "{" + + "\"type\": \"server\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}\", " + + "\"level\": \"%p\", " + + "\"component\": \"%c{1.}\", " + + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + + "\"node.name\": \"%node_name\", " + + "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}\"" + + "%notEmpty{, %node_and_cluster_id }" + + "%exceptionAsJson }" + System.lineSeparator())); + } + + public void testLayoutWithAdditionalFields() { + ESJsonLayout server = ESJsonLayout.newBuilder() + .setType("server") + .setESMessageFields("x-opaque-id,someOtherField") + .build(); + String conversionPattern = server.getPatternLayout().getConversionPattern(); + + assertThat(conversionPattern, Matchers.equalTo( + "{" + + "\"type\": \"server\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}\", " + + "\"level\": \"%p\", " + + "\"component\": \"%c{1.}\", " + + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + + "\"node.name\": \"%node_name\", " + + "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}\"" + + "%notEmpty{, \"x-opaque-id\": \"%ESMessageField{x-opaque-id}\"}" + + "%notEmpty{, \"someOtherField\": \"%ESMessageField{someOtherField}\"}" + + "%notEmpty{, %node_and_cluster_id }" + + "%exceptionAsJson }" + System.lineSeparator())); + } + + public void testLayoutWithAdditionalFieldOverride() { + ESJsonLayout server = ESJsonLayout.newBuilder() + .setType("server") + .setESMessageFields("message") + .build(); + String conversionPattern = server.getPatternLayout().getConversionPattern(); + + assertThat(conversionPattern, Matchers.equalTo( + "{" + + "\"type\": \"server\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}\", " + + "\"level\": \"%p\", " + + "\"component\": \"%c{1.}\", " + + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + + "\"node.name\": \"%node_name\"" + + "%notEmpty{, \"message\": \"%ESMessageField{message}\"}" + + "%notEmpty{, %node_and_cluster_id }" + + "%exceptionAsJson }" + System.lineSeparator())); + } +} diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 2416eb02bfd4e..a3f4ecde8135a 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -27,8 +27,10 @@ import org.elasticsearch.cli.UserException; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.hamcrest.FeatureMatcher; import org.hamcrest.Matcher; @@ -38,19 +40,28 @@ import java.io.IOException; import java.nio.file.Path; import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; + /** * This test confirms JSON log structure is properly formatted and can be parsed. * It has to be in a org.elasticsearch.common.logging package to use PrefixLogger */ public class JsonLoggerTests extends ESTestCase { + private static final String LINE_SEPARATOR = System.lineSeparator(); @BeforeClass public static void initNodeName() { - LogConfigurator.setNodeName("sample-name"); + JsonLogsTestSetup.init(); } @Override @@ -66,6 +77,84 @@ public void tearDown() throws Exception { Configurator.shutdown(context); super.tearDown(); } + public void testDeprecatedMessage() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + testLogger.info(new DeprecatedMessage("deprecated message1", "someId")); + + final Path path = PathUtils.get(System.getProperty("es.logs.base_path"), + System.getProperty("es.logs.cluster_name") + "_deprecated.json"); + try (Stream> stream = JsonLogsStream.mapStreamFrom(path)) { + List> jsonLogs = stream + .collect(Collectors.toList()); + + assertThat(jsonLogs, contains( + allOf( + hasEntry("type", "deprecation"), + hasEntry("level", "INFO"), + hasEntry("component", "test"), + hasEntry("cluster.name", "elasticsearch"), + hasEntry("node.name", "sample-name"), + hasEntry("message", "deprecated message1"), + hasEntry("x-opaque-id", "someId")) + ) + ); + } + } + + + public void testDeprecatedMessageWithoutXOpaqueId() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + testLogger.info(new DeprecatedMessage("deprecated message1", "someId")); + testLogger.info(new DeprecatedMessage("deprecated message2", "")); + testLogger.info(new DeprecatedMessage("deprecated message3", null)); + testLogger.info("deprecated message4"); + + final Path path = PathUtils.get(System.getProperty("es.logs.base_path"), + System.getProperty("es.logs.cluster_name") + "_deprecated.json"); + try (Stream> stream = JsonLogsStream.mapStreamFrom(path)) { + List> jsonLogs = stream + .collect(Collectors.toList()); + + assertThat(jsonLogs, contains( + allOf( + hasEntry("type", "deprecation"), + hasEntry("level", "INFO"), + hasEntry("component", "test"), + hasEntry("cluster.name", "elasticsearch"), + hasEntry("node.name", "sample-name"), + hasEntry("message", "deprecated message1"), + hasEntry("x-opaque-id", "someId")), + allOf( + hasEntry("type", "deprecation"), + hasEntry("level", "INFO"), + hasEntry("component", "test"), + hasEntry("cluster.name", "elasticsearch"), + hasEntry("node.name", "sample-name"), + hasEntry("message", "deprecated message2"), + not(hasKey("x-opaque-id")) + ), + allOf( + hasEntry("type", "deprecation"), + hasEntry("level", "INFO"), + hasEntry("component", "test"), + hasEntry("cluster.name", "elasticsearch"), + hasEntry("node.name", "sample-name"), + hasEntry("message", "deprecated message3"), + not(hasKey("x-opaque-id")) + ), + allOf( + hasEntry("type", "deprecation"), + hasEntry("level", "INFO"), + hasEntry("component", "test"), + hasEntry("cluster.name", "elasticsearch"), + hasEntry("node.name", "sample-name"), + hasEntry("message", "deprecated message4"), + not(hasKey("x-opaque-id")) + ) + ) + ); + } + } public void testJsonLayout() throws IOException { final Logger testLogger = LogManager.getLogger("test"); @@ -79,7 +168,7 @@ public void testJsonLayout() throws IOException { try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = collectLines(stream); - assertThat(jsonLogs, Matchers.contains( + assertThat(jsonLogs, contains( logLine("file", Level.ERROR, "sample-name", "test", "This is an error message"), logLine("file", Level.WARN, "sample-name", "test", "This is a warning message"), logLine("file", Level.INFO, "sample-name", "test", "This is an info message"), @@ -99,8 +188,9 @@ public void testPrefixLoggerInJson() throws IOException { final Path path = clusterLogsPath(); try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = collectLines(stream); - assertThat(jsonLogs, Matchers.contains( - logLine("file", Level.INFO, "sample-name", "shardIdLogger", "[indexName][123] This is an info message with a shardId"), + assertThat(jsonLogs, contains( + logLine("file", Level.INFO, "sample-name", "shardIdLogger", + "[indexName][123] This is an info message with a shardId"), logLine("file", Level.INFO, "sample-name", "prefixLogger", "PREFIX This is an info message with a prefix") )); } @@ -124,7 +214,7 @@ public void testJsonInMessage() throws IOException { final Path path = clusterLogsPath(); try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = collectLines(stream); - assertThat(jsonLogs, Matchers.contains( + assertThat(jsonLogs, contains( logLine("file", Level.INFO, "sample-name", "test", json) )); } @@ -137,8 +227,8 @@ public void testStacktrace() throws IOException { final Path path = clusterLogsPath(); try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = collectLines(stream); - assertThat(jsonLogs, Matchers.contains( - Matchers.allOf( + assertThat(jsonLogs, contains( + allOf( logLine("file", Level.ERROR, "sample-name", "test", "error message"), stacktraceWith("java.lang.Exception: exception message"), stacktraceWith("Caused by: java.lang.RuntimeException: cause message") @@ -166,8 +256,8 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException { try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = collectLines(stream); - assertThat(jsonLogs, Matchers.contains( - Matchers.allOf( + assertThat(jsonLogs, contains( + allOf( //message field will have a single line with json escaped logLine("file", Level.ERROR, "sample-name", "test", "error message " + json), @@ -178,6 +268,87 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException { } } + + public void testDuplicateLogMessages() throws IOException { + final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger("test")); + + + // For the same key and X-Opaque-ID deprecation should be once + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + try{ + threadContext.putHeader(Task.X_OPAQUE_ID, "ID1"); + DeprecationLogger.setThreadContext(threadContext); + deprecationLogger.deprecatedAndMaybeLog("key", "message1"); + deprecationLogger.deprecatedAndMaybeLog("key", "message2"); + assertWarnings("message1", "message2"); + + final Path path = PathUtils.get(System.getProperty("es.logs.base_path"), + System.getProperty("es.logs.cluster_name") + "_deprecated.json"); + try (Stream> stream = JsonLogsStream.mapStreamFrom(path)) { + List> jsonLogs = stream + .collect(Collectors.toList()); + + assertThat(jsonLogs, contains( + allOf( + hasEntry("type", "deprecation"), + hasEntry("level", "WARN"), + hasEntry("component", "d.test"), + hasEntry("cluster.name", "elasticsearch"), + hasEntry("node.name", "sample-name"), + hasEntry("message", "message1"), + hasEntry("x-opaque-id", "ID1")) + ) + ); + } + }finally{ + DeprecationLogger.removeThreadContext(threadContext); + } + } + + // For the same key and different X-Opaque-ID should be multiple times per key/x-opaque-id + //continuing with message1-ID1 in logs already, adding a new deprecation log line with message2-ID2 + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + try{ + threadContext.putHeader(Task.X_OPAQUE_ID, "ID2"); + DeprecationLogger.setThreadContext(threadContext); + deprecationLogger.deprecatedAndMaybeLog("key", "message1"); + deprecationLogger.deprecatedAndMaybeLog("key", "message2"); + assertWarnings("message1", "message2"); + + final Path path = PathUtils.get(System.getProperty("es.logs.base_path"), + System.getProperty("es.logs.cluster_name") + "_deprecated.json"); + try (Stream> stream = JsonLogsStream.mapStreamFrom(path)) { + List> jsonLogs = stream + .collect(Collectors.toList()); + + assertThat(jsonLogs, contains( + allOf( + hasEntry("type", "deprecation"), + hasEntry("level", "WARN"), + hasEntry("component", "d.test"), + hasEntry("cluster.name", "elasticsearch"), + hasEntry("node.name", "sample-name"), + hasEntry("message", "message1"), + hasEntry("x-opaque-id", "ID1") + ), + allOf( + hasEntry("type", "deprecation"), + hasEntry("level", "WARN"), + hasEntry("component", "d.test"), + hasEntry("cluster.name", "elasticsearch"), + hasEntry("node.name", "sample-name"), + hasEntry("message", "message1"), + hasEntry("x-opaque-id", "ID2") + ) + ) + ); + } + }finally{ + DeprecationLogger.removeThreadContext(threadContext); + } + } + } + private List collectLines(Stream stream) { return stream .skip(1)//skip the first line from super class @@ -209,11 +380,11 @@ private Matcher logLine(String type, Level level, String nodeName, @Override protected Boolean featureValueOf(JsonLogLine actual) { - return actual.type().equals(type) && - actual.level().equals(level.toString()) && - actual.nodeName().equals(nodeName) && - actual.component().equals(component) && - actual.message().equals(message); + return Objects.equals(actual.type(), type) && + Objects.equals(actual.level(), level.toString()) && + Objects.equals(actual.nodeName(), nodeName) && + Objects.equals(actual.component(), component) && + Objects.equals(actual.message(), message); } }; } diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLogsTestSetup.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLogsTestSetup.java new file mode 100644 index 0000000000000..7ddb11a569657 --- /dev/null +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLogsTestSetup.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.logging; + +public class JsonLogsTestSetup { + private static boolean initialized = false; + + public static void init() { + if (initialized == false) { + LogConfigurator.setNodeName("sample-name"); + initialized = true; + } + } +} diff --git a/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties index 4bbd0b038ab8a..14400177c18a2 100644 --- a/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties +++ b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties @@ -9,13 +9,43 @@ appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.l appender.file.layout.type = ESJsonLayout appender.file.layout.type_name = file +appender.deprecated.type = File +appender.deprecated.name = deprecated +appender.deprecated.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecated.json +appender.deprecated.layout.type = ESJsonLayout +appender.deprecated.layout.type_name = deprecation +appender.deprecated.layout.esmessagefields = x-opaque-id + +appender.deprecatedconsole.type = Console +appender.deprecatedconsole.name = deprecatedconsole +appender.deprecatedconsole.layout.type = ESJsonLayout +appender.deprecatedconsole.layout.type_name = deprecation +appender.deprecatedconsole.layout.esmessagefields = x-opaque-id + +appender.index_search_slowlog_rolling.type = File +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog.json +appender.index_search_slowlog_rolling.layout.type = ESJsonLayout +appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog +appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id rootLogger.level = info rootLogger.appenderRef.console.ref = console rootLogger.appenderRef.file.ref = file +logger.deprecation.name = deprecation.test +logger.deprecation.level = warn +logger.deprecation.appenderRef.console.ref = console +logger.deprecation.appenderRef.file.ref = file +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecated +logger.deprecation.appenderRef.deprecatedconsole.ref = deprecatedconsole +logger.deprecation.additivity = false + logger.test.name = test logger.test.level = trace logger.test.appenderRef.console.ref = console logger.test.appenderRef.file.ref = file +logger.test.appenderRef.deprecated.ref = deprecated +logger.test.appenderRef.deprecatedconsole.ref = deprecatedconsole logger.test.additivity = false diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index 56805f37beae3..57213f689a57f 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.OS + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -28,4 +30,6 @@ testClusters.integTest { integTest.runner { nonInputProperties.systemProperty 'tests.logfile', "${ -> testClusters.integTest.singleNode().getServerLog() }" + // https://github.com/elastic/elasticsearch/issues/44656 + onlyIf { OS.WINDOWS.equals(OS.current()) == false } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java index 4078dfc4f97db..49cd2022a9673 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -31,7 +31,6 @@ import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -169,8 +168,8 @@ public void assertRunsWithJavaHome() throws Exception { Archives.stopElasticsearch(installation); String systemJavaHome = sh.getEnv().get("JAVA_HOME"); - Path log = installation.logs.resolve("elasticsearch.log"); - assertThat(new String(Files.readAllBytes(log), StandardCharsets.UTF_8), containsString(systemJavaHome)); + assertThat(FileUtils.slurpAllLogs(installation.logs, "elasticsearch.log", "*.log.gz"), + containsString(systemJavaHome)); } public void test51JavaHomeOverride() throws Exception { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java index 245234baf2012..fd8390610fa69 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java @@ -124,8 +124,8 @@ public void assertRunsWithJavaHome() throws Exception { Files.write(installation.envFile, originalEnvFile); } - Path log = installation.logs.resolve("elasticsearch.log"); - assertThat(new String(Files.readAllBytes(log), StandardCharsets.UTF_8), containsString(systemJavaHome)); + assertThat(FileUtils.slurpAllLogs(installation.logs, "elasticsearch.log", "*.log.gz"), + containsString(systemJavaHome)); } public void test32JavaHomeOverride() throws Exception { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java index 57eaf13fe9e94..b0827513c9233 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java @@ -92,7 +92,7 @@ private void assertExit(Result result, String script, int exitCode) { Result logs = sh.run("$files = Get-ChildItem \"" + installation.logs + "\\elasticsearch.log\"; " + "Write-Output $files; " + "foreach ($file in $files) {" + - "Write-Output \"$file\"; " + + "Write-Output \"$file\"; " + "Get-Content \"$file\" " + "}"); logger.error(logs.stdout); diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java index efbf0bd74a354..ca6c3e48d411e 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java @@ -24,7 +24,10 @@ import org.hamcrest.Matcher; import java.io.BufferedWriter; +import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -38,6 +41,9 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.StringJoiner; +import java.util.zip.GZIPInputStream; +import java.util.zip.ZipException; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.emptyIterable; @@ -124,6 +130,56 @@ public static String slurp(Path file) { } } + /** + * Returns the content a {@link java.nio.file.Path} file. The file can be in plain text or GZIP format. + * @param file The {@link java.nio.file.Path} to the file. + * @return The content of {@code file}. + */ + public static String slurpTxtorGz(Path file) { + ByteArrayOutputStream fileBuffer = new ByteArrayOutputStream(); + try (GZIPInputStream in = new GZIPInputStream(Channels.newInputStream(FileChannel.open(file)))) { + byte[] buffer = new byte[1024]; + int len; + + while ((len = in.read(buffer)) != -1) { + fileBuffer.write(buffer, 0, len); + } + + return (new String(fileBuffer.toByteArray(), StandardCharsets.UTF_8)); + } catch (ZipException e) { + if (e.toString().contains("Not in GZIP format")) { + return slurp(file); + } + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Returns combined content of a text log file and rotated log files matching a pattern. Order of rotated log files is + * not guaranteed. + * @param logPath Base directory where log files reside. + * @param activeLogFile The currently active log file. This file needs to be plain text under {@code logPath}. + * @param rotatedLogFilesGlob A glob pattern to match rotated log files under {@code logPath}. + * See {@link java.nio.file.FileSystem#getPathMatcher(String)} for glob examples. + * @return Merges contents of {@code activeLogFile} and contents of filenames matching {@code rotatedLogFilesGlob}. + * File contents are separated by a newline. The order of rotated log files matched by {@code rotatedLogFilesGlob} is not guaranteed. + */ + public static String slurpAllLogs(Path logPath, String activeLogFile, String rotatedLogFilesGlob) { + StringJoiner logFileJoiner = new StringJoiner("\n"); + try { + logFileJoiner.add(new String(Files.readAllBytes(logPath.resolve(activeLogFile)), StandardCharsets.UTF_8)); + + for (Path rotatedLogFile : FileUtils.lsGlob(logPath, rotatedLogFilesGlob)) { + logFileJoiner.add(FileUtils.slurpTxtorGz(rotatedLogFile)); + } + return(logFileJoiner.toString()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + /** * Gets the owner of a file in a way that should be supported by all filesystems that have a concept of file owner */ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json index eb01710fd8415..0274487544652 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json @@ -1,6 +1,6 @@ { "clear_scroll": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-scroll.html", + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json index 1b9ab1bb5eff5..1383224b92f81 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json @@ -1,6 +1,6 @@ { "indices.flush_synced": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush.html", + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html#synced-flush-api", "stability": "stable", "methods": ["POST", "GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json index 88851b2db1e34..42329aae46c40 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json @@ -1,6 +1,6 @@ { "scroll": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-scroll.html", + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index c83edb69b3e62..84afbf0217ab9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -50,7 +50,7 @@ section in order to setup the same environment for each test section. A `teardown` section contains a list of commands to run after each test section in order to setup the same environment for each test section. This -may be needed for modifications made by the testthat are not cleared by the +may be needed for modifications made by the test that are not cleared by the deletion of indices and templates. A test section represents an independent test, containing multiple `do` diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/16_noop.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/16_noop.yml new file mode 100644 index 0000000000000..9462e80e758fe --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/16_noop.yml @@ -0,0 +1,43 @@ +--- +"Noop": + - skip: + version: " - 7.9.99" + reason: "Noop does not return seq_no and primary_term until 8.0" + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + + - match: { _seq_no: 0 } + - match: { _version: 1 } + - match: { _primary_term: 1 } + - match: { result: created } + + - do: + update: + index: test_1 + type: test + id: 1 + body: + doc: { foo: bar } + + - match: { _seq_no: 0 } + - match: { _version: 1 } + - match: { _primary_term: 1 } + - match: { result: noop } + + - do: + update: + index: test_1 + type: test + id: 1 + body: + doc: { foo: bar } + detect_noop: false + + - match: { _seq_no: 1 } + - match: { _primary_term: 1 } + - match: { _version: 2 } + - match: { result: updated } diff --git a/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 b/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 deleted file mode 100644 index 6eb7722fec744..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -262f20cb2786cdf7015a4ba1a64ce90ff2d746f5 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-analyzers-common-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..962a7447eeee6 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +dc386e4b342d56474f4220d3906fb73432be12ee \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 b/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 deleted file mode 100644 index c232e0fbdfdb9..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5610306f8eff182b399b9aed7a60b82668a8395 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-backward-codecs-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..a8cd7645a9b60 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +93b881f369fc1f71eaee7b3604885b9acf38e807 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.1.0.jar.sha1 b/server/licenses/lucene-core-8.1.0.jar.sha1 deleted file mode 100644 index 4a6aa7b098686..0000000000000 --- a/server/licenses/lucene-core-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -46d614acdeb42f4661e91347100217bc72aae11e \ No newline at end of file diff --git a/server/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..8e703b4ec6b84 --- /dev/null +++ b/server/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +79f8f65bf5a536b95a5e1074ba431544a0a73fcb \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.1.0.jar.sha1 b/server/licenses/lucene-grouping-8.1.0.jar.sha1 deleted file mode 100644 index f3c49cb193aba..0000000000000 --- a/server/licenses/lucene-grouping-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -443f63d9038eea0601b493fa37fc599d74b035eb \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-grouping-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..e5a8057ec84f9 --- /dev/null +++ b/server/licenses/lucene-grouping-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +d7e6a3723bf52c101bb09c39dc532fff7db74d89 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.1.0.jar.sha1 b/server/licenses/lucene-highlighter-8.1.0.jar.sha1 deleted file mode 100644 index 6b174859e1834..0000000000000 --- a/server/licenses/lucene-highlighter-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e3e52591f8d44a4e1006ced4dd4a67f7a572990a \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-highlighter-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..450f408212bec --- /dev/null +++ b/server/licenses/lucene-highlighter-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +5618357f383674274fbc300213242acb298fedfb \ No newline at end of file diff --git a/server/licenses/lucene-join-8.1.0.jar.sha1 b/server/licenses/lucene-join-8.1.0.jar.sha1 deleted file mode 100644 index 75232f1fc0a72..0000000000000 --- a/server/licenses/lucene-join-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2e885b1e3e55f94ccc2744f85738563a577a4e21 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-join-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..cc2161879b7bc --- /dev/null +++ b/server/licenses/lucene-join-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +f09a43945578dba87eecda0b316cb980c64ced3c \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.1.0.jar.sha1 b/server/licenses/lucene-memory-8.1.0.jar.sha1 deleted file mode 100644 index 4b2c65af32da5..0000000000000 --- a/server/licenses/lucene-memory-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e58d0092da1c4744627d57d022f4e07d8b80d11b \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-memory-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..0aa293c816f46 --- /dev/null +++ b/server/licenses/lucene-memory-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +e0c8338473d317024ab6d60a3b681eb0489aae80 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.1.0.jar.sha1 b/server/licenses/lucene-misc-8.1.0.jar.sha1 deleted file mode 100644 index 37afcfadb7e12..0000000000000 --- a/server/licenses/lucene-misc-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -07833aee2c5feb6fa1a16a21d27c8f15c01d0b4c \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-misc-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..0e0a91bffe119 --- /dev/null +++ b/server/licenses/lucene-misc-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +8ab08f7a01e7109d64eb7b17d6099c51abe77b2f \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.1.0.jar.sha1 b/server/licenses/lucene-queries-8.1.0.jar.sha1 deleted file mode 100644 index 7f09849b67693..0000000000000 --- a/server/licenses/lucene-queries-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -63096d40298b8b8245a602d344b57bfa14b929fd \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-queries-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..95ddbb354cc99 --- /dev/null +++ b/server/licenses/lucene-queries-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +a8ad706739d679b1f7d076a2f70e3cfa794292cb \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.1.0.jar.sha1 b/server/licenses/lucene-queryparser-8.1.0.jar.sha1 deleted file mode 100644 index ada3ec974e031..0000000000000 --- a/server/licenses/lucene-queryparser-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9bb4fb3c7035a877e4a87ed86870894509d26d65 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-queryparser-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..6b381afbf6264 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +c688121186a33d0ac5283c24fb0b1dd18de1d1f5 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.1.0.jar.sha1 b/server/licenses/lucene-sandbox-8.1.0.jar.sha1 deleted file mode 100644 index 422195c73c69d..0000000000000 --- a/server/licenses/lucene-sandbox-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1033737c97703516134ba4c99d41724729854df4 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-sandbox-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..c1fd694c80e3d --- /dev/null +++ b/server/licenses/lucene-sandbox-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +7f7a0a7be83093e77775aaec3be63e59a537166e \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.1.0.jar.sha1 b/server/licenses/lucene-spatial-8.1.0.jar.sha1 deleted file mode 100644 index e0d8f362a1ecf..0000000000000 --- a/server/licenses/lucene-spatial-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -968d2fb35b0c2e68ac07c1ec187ab38a74b6602a \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-spatial-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..c85ac98032cb6 --- /dev/null +++ b/server/licenses/lucene-spatial-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +3cfe4a86fad519f1a78dfbdb8b1133550f7cb5d5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 b/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 deleted file mode 100644 index 0a45cfe117a3a..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -551b7fa327645d3fd59ae1321320153b2f858766 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-spatial-extras-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..fd1f769ec7856 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +6710bc40dc4108fe12f9f56b3e23660c40f65df6 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 b/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 deleted file mode 100644 index 9cdde5a308e22..0000000000000 --- a/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -45e63df708be458e95d9da3e6054189c50c30dff \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-spatial3d-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..14f72bd268d80 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +8769653d5fadddf0f376e152700b9578bddd74e7 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.1.0.jar.sha1 b/server/licenses/lucene-suggest-8.1.0.jar.sha1 deleted file mode 100644 index c4ac6e68080ab..0000000000000 --- a/server/licenses/lucene-suggest-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5cd0e619b473e132f03e3577d1b422f050f99c0 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-suggest-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..e100acc8b2ca7 --- /dev/null +++ b/server/licenses/lucene-suggest-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +96c17ce3b4c9e8c9b6a525a8204e7dd2ea18496c \ No newline at end of file diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java index 4dd36896bf346..2d35de522b5f0 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -137,10 +137,10 @@ protected FieldHighlighter getFieldHighlighter(String field, Query query, Set highlightFlags = getFlags(field); PhraseHelper phraseHelper = getPhraseHelper(field, query, highlightFlags); CharacterRunAutomaton[] automata = getAutomata(field, query, highlightFlags); - OffsetSource offsetSource = getOptimizedOffsetSource(field, terms, phraseHelper, automata); + UHComponents components = new UHComponents(field, fieldMatcher, query, terms, phraseHelper, automata, false , highlightFlags); + OffsetSource offsetSource = getOptimizedOffsetSource(components); BreakIterator breakIterator = new SplittingBreakIterator(getBreakIterator(field), UnifiedHighlighter.MULTIVAL_SEP_CHAR); - UHComponents components = new UHComponents(field, fieldMatcher, query, terms, phraseHelper, automata, highlightFlags); FieldOffsetStrategy strategy = getOffsetStrategy(offsetSource, components); return new CustomFieldHighlighter(field, strategy, breakIteratorLocale, breakIterator, getScorer(field), maxPassages, (noMatchSize > 0 ? 1 : 0), getFormatter(field), noMatchSize, fieldValue); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 152446cb700be..4fab1dcfd052b 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -54,8 +54,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_2_0 = new Version(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); + public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final Version CURRENT = V_8_0_0; private static final ImmutableOpenIntMap idToVersion; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 9ccd4c9fdfdb0..9c2ddf2b1d0d5 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; @@ -71,6 +70,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.restore.TransportRestoreSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusAction; +import org.elasticsearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.TransportSnapshotsStatusAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; @@ -95,6 +95,7 @@ import org.elasticsearch.action.admin.indices.cache.clear.TransportClearIndicesCacheAction; import org.elasticsearch.action.admin.indices.close.CloseIndexAction; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; +import org.elasticsearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; @@ -102,6 +103,7 @@ import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; +import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction; import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.TransportForceMergeAction; @@ -121,6 +123,7 @@ import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction; +import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; @@ -206,11 +209,14 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.gateway.TransportNodesListGatewayMetaState; +import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction; import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.persistent.CompletionPersistentTaskAction; import org.elasticsearch.persistent.RemovePersistentTaskAction; import org.elasticsearch.persistent.StartPersistentTaskAction; @@ -532,6 +538,13 @@ public void reg actions.register(GlobalCheckpointSyncAction.TYPE, GlobalCheckpointSyncAction.class); actions.register(RetentionLeaseBackgroundSyncAction.TYPE, RetentionLeaseBackgroundSyncAction.class); actions.register(RetentionLeaseSyncAction.TYPE, RetentionLeaseSyncAction.class); + actions.register(TransportNodesSnapshotsStatus.TYPE, TransportNodesSnapshotsStatus.class); + actions.register(TransportNodesListGatewayMetaState.TYPE, TransportNodesListGatewayMetaState.class); + actions.register(TransportVerifyShardBeforeCloseAction.TYPE, TransportVerifyShardBeforeCloseAction.class); + actions.register(TransportNodesListGatewayStartedShards.TYPE, TransportNodesListGatewayStartedShards.class); + actions.register(TransportNodesListShardStoreMetaData.TYPE, TransportNodesListShardStoreMetaData.class); + actions.register(TransportShardFlushAction.TYPE, TransportShardFlushAction.class); + actions.register(TransportShardRefreshAction.TYPE, TransportShardRefreshAction.class); return unmodifiableMap(actions.getRegistry()); } @@ -690,7 +703,6 @@ protected void configure() { bind(new TypeLiteral>() {}).toInstance(mappingRequestValidators); bind(new TypeLiteral>() {}).toInstance(indicesAliasesRequestRequestValidators); bind(AutoCreateIndex.class).toInstance(autoCreateIndex); - bind(TransportLivenessAction.class).asEagerSingleton(); // register ActionType -> transportAction Map used by NodeClient @SuppressWarnings("rawtypes") diff --git a/server/src/main/java/org/elasticsearch/action/ActionRequest.java b/server/src/main/java/org/elasticsearch/action/ActionRequest.java index f5f10c7bcfa9d..3b2055ba98f5f 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRequest.java @@ -47,11 +47,6 @@ public boolean getShouldStoreResult() { return false; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/ActionResponse.java b/server/src/main/java/org/elasticsearch/action/ActionResponse.java index b2729a5372f56..f06a88f84fbc3 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ActionResponse.java @@ -35,9 +35,4 @@ public ActionResponse() { public ActionResponse(StreamInput in) throws IOException { super(in); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } } diff --git a/server/src/main/java/org/elasticsearch/action/ActionType.java b/server/src/main/java/org/elasticsearch/action/ActionType.java index 02f8f3c6fc22f..d931ca2b27579 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionType.java +++ b/server/src/main/java/org/elasticsearch/action/ActionType.java @@ -31,15 +31,6 @@ public class ActionType { private final String name; private final Writeable.Reader responseReader; - /** - * @param name The name of the action, must be unique across actions. - * @deprecated Pass a {@link Writeable.Reader} with {@link } - */ - @Deprecated - protected ActionType(String name) { - this(name, null); - } - /** * @param name The name of the action, must be unique across actions. * @param responseReader A reader for the response type diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 61328a78df69c..5594a21a2cc45 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -226,9 +226,7 @@ static DocWriteRequest readDocumentRequest(StreamInput in) throws IOException } else if (type == 1) { docWriteRequest = new DeleteRequest(in); } else if (type == 2) { - UpdateRequest updateRequest = new UpdateRequest(); - updateRequest.readFrom(in); - docWriteRequest = updateRequest; + docWriteRequest = new UpdateRequest(in); } else { throw new IllegalStateException("invalid request type [" + type+ " ]"); } diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 80225c3a60ce4..55b80e58514bc 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -111,14 +111,14 @@ public void writeTo(StreamOutput out) throws IOException { } } - private ShardId shardId; - private String id; - private String type; - private long version; - private long seqNo; - private long primaryTerm; + private final ShardId shardId; + private final String id; + private final String type; + private final long version; + private final long seqNo; + private final long primaryTerm; private boolean forcedRefresh; - protected Result result; + protected final Result result; public DocWriteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { this.shardId = shardId; @@ -131,7 +131,16 @@ public DocWriteResponse(ShardId shardId, String type, String id, long seqNo, lon } // needed for deserialization - protected DocWriteResponse() { + protected DocWriteResponse(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); + type = in.readString(); + id = in.readString(); + version = in.readZLong(); + seqNo = in.readZLong(); + primaryTerm = in.readVLong(); + forcedRefresh = in.readBoolean(); + result = Result.readFrom(in); } /** @@ -256,19 +265,6 @@ public String getLocation(@Nullable String routing) { return location.toString(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - type = in.readString(); - id = in.readString(); - version = in.readZLong(); - seqNo = in.readZLong(); - primaryTerm = in.readVLong(); - forcedRefresh = in.readBoolean(); - result = Result.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -372,8 +368,8 @@ public abstract static class Builder { protected Result result = null; protected boolean forcedRefresh; protected ShardInfo shardInfo = null; - protected Long seqNo = UNASSIGNED_SEQ_NO; - protected Long primaryTerm = UNASSIGNED_PRIMARY_TERM; + protected long seqNo = UNASSIGNED_SEQ_NO; + protected long primaryTerm = UNASSIGNED_PRIMARY_TERM; public ShardId getShardId() { return shardId; @@ -415,11 +411,11 @@ public void setShardInfo(ShardInfo shardInfo) { this.shardInfo = shardInfo; } - public void setSeqNo(Long seqNo) { + public void setSeqNo(long seqNo) { this.seqNo = seqNo; } - public void setPrimaryTerm(Long primaryTerm) { + public void setPrimaryTerm(long primaryTerm) { this.primaryTerm = primaryTerm; } diff --git a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java index 34a8ccd7ad148..1f95cd0186eac 100644 --- a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java @@ -20,7 +20,7 @@ package org.elasticsearch.action; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.rest.RestStatus; @@ -30,7 +30,7 @@ * An exception indicating that a failure occurred performing an operation on the shard. * */ -public abstract class ShardOperationFailedException implements Streamable, ToXContentObject { +public abstract class ShardOperationFailedException implements Writeable, ToXContentObject { protected String index; protected int shardId = -1; diff --git a/server/src/main/java/org/elasticsearch/action/StepListener.java b/server/src/main/java/org/elasticsearch/action/StepListener.java index 160ba23da246f..43338d94de86d 100644 --- a/server/src/main/java/org/elasticsearch/action/StepListener.java +++ b/server/src/main/java/org/elasticsearch/action/StepListener.java @@ -50,7 +50,7 @@ * } */ -public final class StepListener implements ActionListener { +public final class StepListener extends NotifyOnceListener { private final ListenableFuture delegate; public StepListener() { @@ -58,12 +58,12 @@ public StepListener() { } @Override - public void onResponse(Response response) { + protected void innerOnResponse(Response response) { delegate.onResponse(response); } @Override - public void onFailure(Exception e) { + protected void innerOnFailure(Exception e) { delegate.onFailure(e); } diff --git a/server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java b/server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java deleted file mode 100644 index b8206bb03f89d..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action; - -import org.elasticsearch.common.io.stream.Writeable; - -/** - * An action for with the response type implements {@link org.elasticsearch.common.io.stream.Streamable}. - * @deprecated Use {@link ActionType} directly and provide a {@link Writeable.Reader} - */ -@Deprecated -public abstract class StreamableResponseActionType extends ActionType { - - protected StreamableResponseActionType(String name) { - super(name); - } - - /** - * Creates a new response instance. - * @deprecated Implement {@link #getResponseReader()} instead and make this method throw an - * {@link UnsupportedOperationException} - */ - @Deprecated - public abstract Response newResponse(); - - @Override - public final Writeable.Reader getResponseReader() { - return in -> { - Response response = newResponse(); - response.readFrom(in); - return response; - }; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java index acaaed9eaa985..47d8a94dd2fd0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java @@ -19,22 +19,17 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for explaining shard allocation for a shard in the cluster */ -public class ClusterAllocationExplainAction extends StreamableResponseActionType { +public class ClusterAllocationExplainAction extends ActionType { public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction(); public static final String NAME = "cluster:monitor/allocation/explain"; private ClusterAllocationExplainAction() { - super(NAME); - } - - @Override - public ClusterAllocationExplainResponse newResponse() { - return new ClusterAllocationExplainResponse(); + super(NAME, ClusterAllocationExplainResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index b6959afba5d89..0b0bb8c57a9b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -243,9 +243,4 @@ public String toString() { public static ClusterAllocationExplainRequest parse(XContentParser parser) throws IOException { return PARSER.parse(parser, new ClusterAllocationExplainRequest(), null); } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java index 344c448a2c07b..0dadd79466560 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java @@ -32,7 +32,9 @@ public class ClusterAllocationExplainResponse extends ActionResponse { private ClusterAllocationExplanation cae; - public ClusterAllocationExplainResponse() { + public ClusterAllocationExplainResponse(StreamInput in) throws IOException { + super(in); + this.cae = new ClusterAllocationExplanation(in); } public ClusterAllocationExplainResponse(ClusterAllocationExplanation cae) { @@ -46,12 +48,6 @@ public ClusterAllocationExplanation getExplanation() { return this.cae; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.cae = new ClusterAllocationExplanation(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { cae.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 2fe4982acc63e..846f920b7b183 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -40,11 +40,13 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.List; /** @@ -79,13 +81,13 @@ protected String executor() { } @Override - protected ClusterBlockException checkBlock(ClusterAllocationExplainRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + protected ClusterAllocationExplainResponse read(StreamInput in) throws IOException { + return new ClusterAllocationExplainResponse(in); } @Override - protected ClusterAllocationExplainResponse newResponse() { - return new ClusterAllocationExplainResponse(); + protected ClusterBlockException checkBlock(ClusterAllocationExplainRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java index a2f0c721b5da4..d974457328b03 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java @@ -19,18 +19,12 @@ package org.elasticsearch.action.admin.cluster.configuration; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable.Reader; public class AddVotingConfigExclusionsAction extends ActionType { public static final AddVotingConfigExclusionsAction INSTANCE = new AddVotingConfigExclusionsAction(); public static final String NAME = "cluster:admin/voting_config/add_exclusions"; private AddVotingConfigExclusionsAction() { - super(NAME); - } - - @Override - public Reader getResponseReader() { - return AddVotingConfigExclusionsResponse::new; + super(NAME, AddVotingConfigExclusionsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index f19a3b05b5bf3..df11435aba1c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -118,11 +118,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java index a3f23871b65fb..b6530be9092ae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java @@ -39,11 +39,6 @@ public AddVotingConfigExclusionsResponse(StreamInput in) throws IOException { super(in); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException {} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java index 6091800693f49..c7a00c53bf9ea 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java @@ -19,18 +19,12 @@ package org.elasticsearch.action.admin.cluster.configuration; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable.Reader; public class ClearVotingConfigExclusionsAction extends ActionType { public static final ClearVotingConfigExclusionsAction INSTANCE = new ClearVotingConfigExclusionsAction(); public static final String NAME = "cluster:admin/voting_config/clear_exclusions"; private ClearVotingConfigExclusionsAction() { - super(NAME); - } - - @Override - public Reader getResponseReader() { - return ClearVotingConfigExclusionsResponse::new; + super(NAME, ClearVotingConfigExclusionsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java index ec92eeab4abfe..83490e64b29f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java @@ -83,11 +83,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java index d44c00c7ee1e7..1c36c992eddeb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java @@ -38,11 +38,6 @@ public ClearVotingConfigExclusionsResponse(StreamInput in) throws IOException { super(in); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException {} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index a6474510e88f3..13270075806c3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -68,11 +68,6 @@ protected String executor() { return Names.SAME; } - @Override - protected AddVotingConfigExclusionsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected AddVotingConfigExclusionsResponse read(StreamInput in) throws IOException { return new AddVotingConfigExclusionsResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 3b441e7ea9db7..78709c3db09ff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -62,11 +62,6 @@ protected String executor() { return Names.SAME; } - @Override - protected ClearVotingConfigExclusionsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClearVotingConfigExclusionsResponse read(StreamInput in) throws IOException { return new ClearVotingConfigExclusionsResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java index e8f5ecfaf5b66..7af8da7b95c22 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.cluster.health; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ClusterHealthAction extends StreamableResponseActionType { +public class ClusterHealthAction extends ActionType { public static final ClusterHealthAction INSTANCE = new ClusterHealthAction(); public static final String NAME = "cluster:monitor/health"; private ClusterHealthAction() { - super(NAME); - } - - @Override - public ClusterHealthResponse newResponse() { - return new ClusterHealthResponse(); + super(NAME, ClusterHealthResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 375d9e271398f..9bea7b68caf0b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -279,11 +279,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - public enum Level { CLUSTER, INDICES, SHARDS } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index 54bd5f1b1c374..163c66003f4d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -142,7 +142,18 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo private ClusterStateHealth clusterStateHealth; private ClusterHealthStatus clusterHealthStatus; - ClusterHealthResponse() { + public ClusterHealthResponse() {} + + public ClusterHealthResponse(StreamInput in) throws IOException { + super(in); + clusterName = in.readString(); + clusterHealthStatus = ClusterHealthStatus.fromValue(in.readByte()); + clusterStateHealth = new ClusterStateHealth(in); + numberOfPendingTasks = in.readInt(); + timedOut = in.readBoolean(); + numberOfInFlightFetch = in.readInt(); + delayedUnassignedShards= in.readInt(); + taskMaxWaitingTime = in.readTimeValue(); } /** needed for plugins BWC */ @@ -277,22 +288,7 @@ public double getActiveShardsPercent() { } public static ClusterHealthResponse readResponseFrom(StreamInput in) throws IOException { - ClusterHealthResponse response = new ClusterHealthResponse(); - response.readFrom(in); - return response; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - clusterName = in.readString(); - clusterHealthStatus = ClusterHealthStatus.fromValue(in.readByte()); - clusterStateHealth = new ClusterStateHealth(in); - numberOfPendingTasks = in.readInt(); - timedOut = in.readBoolean(); - numberOfInFlightFetch = in.readInt(); - delayedUnassignedShards= in.readInt(); - taskMaxWaitingTime = in.readTimeValue(); + return new ClusterHealthResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 08b8730b02419..cb2704e659d95 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.cluster.health; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -37,6 +39,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.IndexNotFoundException; @@ -44,10 +47,13 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; +import java.util.function.Consumer; import java.util.function.Predicate; -public class TransportClusterHealthAction - extends TransportMasterNodeReadAction { +public class TransportClusterHealthAction extends TransportMasterNodeReadAction { + + private static final Logger logger = LogManager.getLogger(TransportClusterHealthAction.class); private final GatewayAllocator gatewayAllocator; @@ -67,140 +73,164 @@ protected String executor() { } @Override - protected ClusterBlockException checkBlock(ClusterHealthRequest request, ClusterState state) { - // we want users to be able to call this even when there are global blocks, just to check the health (are there blocks?) - return null; + protected ClusterHealthResponse read(StreamInput in) throws IOException { + return new ClusterHealthResponse(in); } @Override - protected ClusterHealthResponse newResponse() { - return new ClusterHealthResponse(); + protected ClusterBlockException checkBlock(ClusterHealthRequest request, ClusterState state) { + // we want users to be able to call this even when there are global blocks, just to check the health (are there blocks?) + return null; } @Override - protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, + protected void masterOperation(final Task task, + final ClusterHealthRequest request, + final ClusterState unusedState, final ActionListener listener) { + + final int waitCount = getWaitCount(request); + if (request.waitForEvents() != null) { - final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis(); - if (request.local()) { - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", - new LocalClusterUpdateTask(request.waitForEvents()) { - @Override - public ClusterTasksResult execute(ClusterState currentState) { - return unchanged(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); - final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); - request.timeout(newTimeout); - executeHealth(request, listener); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - listener.onFailure(e); - } - }); - } else { - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", - new ClusterStateUpdateTask(request.waitForEvents()) { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); - final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); - request.timeout(newTimeout); - executeHealth(request, listener); - } - - @Override - public void onNoLongerMaster(String source) { - logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", - request.waitForEvents()); - // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException - listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - listener.onFailure(e); - } - }); - } + waitForEventsAndExecuteHealth(request, listener, waitCount, threadPool.relativeTimeInMillis() + request.timeout().millis()); } else { - executeHealth(request, listener); + executeHealth(request, clusterService.state(), listener, waitCount, + clusterState -> listener.onResponse(getResponse(request, clusterState, waitCount, false))); } + } + + private void waitForEventsAndExecuteHealth(final ClusterHealthRequest request, + final ActionListener listener, + final int waitCount, + final long endTimeRelativeMillis) { + assert request.waitForEvents() != null; + if (request.local()) { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", + new LocalClusterUpdateTask(request.waitForEvents()) { + @Override + public ClusterTasksResult execute(ClusterState currentState) { + return unchanged(); + } + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeRelativeMillis - threadPool.relativeTimeInMillis()); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + executeHealth(request, clusterService.state(), listener, waitCount, + observedState -> waitForEventsAndExecuteHealth(request, listener, waitCount, endTimeRelativeMillis)); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } + }); + } else { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", + new ClusterStateUpdateTask(request.waitForEvents()) { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeRelativeMillis - threadPool.relativeTimeInMillis()); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + + // we must use the state from the applier service, because if the state-not-recovered block is in place then the + // applier service has a different view of the cluster state from the one supplied here + final ClusterState appliedState = clusterService.state(); + assert newState.stateUUID().equals(appliedState.stateUUID()) + : newState.stateUUID() + " vs " + appliedState.stateUUID(); + executeHealth(request, appliedState, listener, waitCount, + observedState -> waitForEventsAndExecuteHealth(request, listener, waitCount, endTimeRelativeMillis)); + } + + @Override + public void onNoLongerMaster(String source) { + logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", + request.waitForEvents()); + // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException + listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } + }); + } } - private void executeHealth(final ClusterHealthRequest request, final ActionListener listener) { - int waitFor = 0; + private void executeHealth(final ClusterHealthRequest request, + final ClusterState currentState, + final ActionListener listener, + final int waitCount, + final Consumer onNewClusterStateAfterDelay) { + + if (request.timeout().millis() == 0) { + listener.onResponse(getResponse(request, currentState, waitCount, true)); + return; + } + + final Predicate validationPredicate = newState -> validateRequest(request, newState, waitCount); + if (validationPredicate.test(currentState)) { + listener.onResponse(getResponse(request, currentState, waitCount, false)); + } else { + final ClusterStateObserver observer + = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); + final ClusterStateObserver.Listener stateListener = new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState newState) { + onNewClusterStateAfterDelay.accept(newState); + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new IllegalStateException("ClusterService was close during health call")); + } + + @Override + public void onTimeout(TimeValue timeout) { + listener.onResponse(getResponse(request, observer.setAndGetObservedState(), waitCount, true)); + } + }; + observer.waitForNextChange(stateListener, validationPredicate, request.timeout()); + } + } + + private static int getWaitCount(ClusterHealthRequest request) { + int waitCount = 0; if (request.waitForStatus() != null) { - waitFor++; + waitCount++; } if (request.waitForNoRelocatingShards()) { - waitFor++; + waitCount++; } if (request.waitForNoInitializingShards()) { - waitFor++; + waitCount++; } if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) { - waitFor++; + waitCount++; } if (request.waitForNodes().isEmpty() == false) { - waitFor++; + waitCount++; } if (request.indices() != null && request.indices().length > 0) { // check that they actually exists in the meta data - waitFor++; - } - - final ClusterState state = clusterService.state(); - final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, - null, logger, threadPool.getThreadContext()); - if (request.timeout().millis() == 0) { - listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0)); - return; - } - final int concreteWaitFor = waitFor; - final Predicate validationPredicate = newState -> validateRequest(request, newState, concreteWaitFor); - - final ClusterStateObserver.Listener stateListener = new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState clusterState) { - listener.onResponse(getResponse(request, clusterState, concreteWaitFor, false)); - } - - @Override - public void onClusterServiceClose() { - listener.onFailure(new IllegalStateException("ClusterService was close during health call")); - } - - @Override - public void onTimeout(TimeValue timeout) { - final ClusterHealthResponse response = getResponse(request, observer.setAndGetObservedState(), concreteWaitFor, true); - listener.onResponse(response); - } - }; - if (validationPredicate.test(state)) { - stateListener.onNewClusterState(state); - } else { - observer.waitForNextChange(stateListener, validationPredicate, request.timeout()); + waitCount++; } + return waitCount; } - private boolean validateRequest(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor) { + private boolean validateRequest(final ClusterHealthRequest request, ClusterState clusterState, final int waitCount) { ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(), gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime()); - int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver); - return readyCounter == waitFor; + return prepareResponse(request, response, clusterState, indexNameExpressionResolver) == waitCount; } private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, @@ -220,7 +250,7 @@ private ClusterHealthResponse getResponse(final ClusterHealthRequest request, Cl } static int prepareResponse(final ClusterHealthRequest request, final ClusterHealthResponse response, - final ClusterState clusterState, final IndexNameExpressionResolver indexNameExpressionResolver) { + final ClusterState clusterState, final IndexNameExpressionResolver indexNameExpressionResolver) { int waitForCounter = 0; if (request.waitForStatus() != null && response.getStatus().value() <= request.waitForStatus().value()) { waitForCounter++; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java index 7268f962cee1e..60590d30e9c0c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java @@ -30,7 +30,9 @@ public class NodeHotThreads extends BaseNodeResponse { private String hotThreads; - NodeHotThreads() { + NodeHotThreads(StreamInput in) throws IOException { + super(in); + hotThreads = in.readString(); } public NodeHotThreads(DiscoveryNode node, String hotThreads) { @@ -42,18 +44,6 @@ public String getHotThreads() { return this.hotThreads; } - public static NodeHotThreads readNodeHotThreads(StreamInput in) throws IOException { - NodeHotThreads node = new NodeHotThreads(); - node.readFrom(in); - return node; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - hotThreads = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java index 4833625d29522..525dc94449cc8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class NodesHotThreadsAction extends StreamableResponseActionType { +public class NodesHotThreadsAction extends ActionType { public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction(); public static final String NAME = "cluster:monitor/nodes/hot_threads"; private NodesHotThreadsAction() { - super(NAME); - } - - @Override - public NodesHotThreadsResponse newResponse() { - return new NodesHotThreadsResponse(); + super(NAME, NodesHotThreadsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index e4bd5b9128ec5..8eb732299596a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -36,8 +36,13 @@ public class NodesHotThreadsRequest extends BaseNodesRequest { - NodesHotThreadsResponse() { + public NodesHotThreadsResponse(StreamInput in) throws IOException { + super(in); } public NodesHotThreadsResponse(ClusterName clusterName, List nodes, List failures) { @@ -39,12 +40,12 @@ public NodesHotThreadsResponse(ClusterName clusterName, List nod @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeHotThreads::readNodeHotThreads); + return in.readList(NodeHotThreads::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 4f85177b6e671..8f35b505cdc80 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -60,8 +60,8 @@ protected NodeRequest newNodeRequest(NodesHotThreadsRequest request) { } @Override - protected NodeHotThreads newNodeResponse() { - return new NodeHotThreads(); + protected NodeHotThreads newNodeResponse(StreamInput in) throws IOException { + return new NodeHotThreads(in); } @Override @@ -83,20 +83,15 @@ public static class NodeRequest extends BaseNodeRequest { NodesHotThreadsRequest request; - public NodeRequest() { + public NodeRequest(StreamInput in) throws IOException { + super(in); + request = new NodesHotThreadsRequest(in); } NodeRequest(NodesHotThreadsRequest request) { this.request = request; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = new NodesHotThreadsRequest(); - request.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 0ab12fe6c0912..1a1bd4e0466a3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -76,7 +76,26 @@ public class NodeInfo extends BaseNodeResponse { @Nullable private ByteSizeValue totalIndexingBuffer; - public NodeInfo() { + public NodeInfo(StreamInput in) throws IOException { + super(in); + version = Version.readVersion(in); + build = Build.readBuild(in); + if (in.readBoolean()) { + totalIndexingBuffer = new ByteSizeValue(in.readLong()); + } else { + totalIndexingBuffer = null; + } + if (in.readBoolean()) { + settings = Settings.readSettingsFromStream(in); + } + os = in.readOptionalWriteable(OsInfo::new); + process = in.readOptionalWriteable(ProcessInfo::new); + jvm = in.readOptionalWriteable(JvmInfo::new); + threadPool = in.readOptionalWriteable(ThreadPoolInfo::new); + transport = in.readOptionalWriteable(TransportInfo::new); + http = in.readOptionalWriteable(HttpInfo::new); + plugins = in.readOptionalWriteable(PluginsAndModules::new); + ingest = in.readOptionalWriteable(IngestInfo::new); } public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings, @@ -182,35 +201,6 @@ public ByteSizeValue getTotalIndexingBuffer() { return totalIndexingBuffer; } - public static NodeInfo readNodeInfo(StreamInput in) throws IOException { - NodeInfo nodeInfo = new NodeInfo(); - nodeInfo.readFrom(in); - return nodeInfo; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - version = Version.readVersion(in); - build = Build.readBuild(in); - if (in.readBoolean()) { - totalIndexingBuffer = new ByteSizeValue(in.readLong()); - } else { - totalIndexingBuffer = null; - } - if (in.readBoolean()) { - settings = Settings.readSettingsFromStream(in); - } - os = in.readOptionalWriteable(OsInfo::new); - process = in.readOptionalWriteable(ProcessInfo::new); - jvm = in.readOptionalWriteable(JvmInfo::new); - threadPool = in.readOptionalWriteable(ThreadPoolInfo::new); - transport = in.readOptionalWriteable(TransportInfo::new); - http = in.readOptionalWriteable(HttpInfo::new); - plugins = in.readOptionalWriteable(PluginsAndModules::new); - ingest = in.readOptionalWriteable(IngestInfo::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java index e94390d8f92d8..36c6f548840e4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class NodesInfoAction extends StreamableResponseActionType { +public class NodesInfoAction extends ActionType { public static final NodesInfoAction INSTANCE = new NodesInfoAction(); public static final String NAME = "cluster:monitor/nodes/info"; private NodesInfoAction() { - super(NAME); - } - - @Override - public NodesInfoResponse newResponse() { - return new NodesInfoResponse(); + super(NAME, NodesInfoResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java index b547d1d743272..80473246e952e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -41,7 +41,18 @@ public class NodesInfoRequest extends BaseNodesRequest { private boolean ingest = true; private boolean indices = true; - public NodesInfoRequest() { + public NodesInfoRequest(StreamInput in) throws IOException { + super(in); + settings = in.readBoolean(); + os = in.readBoolean(); + process = in.readBoolean(); + jvm = in.readBoolean(); + threadPool = in.readBoolean(); + transport = in.readBoolean(); + http = in.readBoolean(); + plugins = in.readBoolean(); + ingest = in.readBoolean(); + indices = in.readBoolean(); } /** @@ -240,21 +251,6 @@ public boolean indices() { return indices; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - settings = in.readBoolean(); - os = in.readBoolean(); - process = in.readBoolean(); - jvm = in.readBoolean(); - threadPool = in.readBoolean(); - transport = in.readBoolean(); - http = in.readBoolean(); - plugins = in.readBoolean(); - ingest = in.readBoolean(); - indices = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java index 4d70b97ca2cf6..30cb93f0f68d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -37,7 +37,8 @@ public class NodesInfoResponse extends BaseNodesResponse implements ToXContentFragment { - public NodesInfoResponse() { + public NodesInfoResponse(StreamInput in) throws IOException { + super(in); } public NodesInfoResponse(ClusterName clusterName, List nodes, List failures) { @@ -46,12 +47,12 @@ public NodesInfoResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeInfo::readNodeInfo); + return in.readList(NodeInfo::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 903f6adb7b931..cd960d75da3a4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -62,8 +62,8 @@ protected NodeInfoRequest newNodeRequest(NodesInfoRequest request) { } @Override - protected NodeInfo newNodeResponse() { - return new NodeInfo(); + protected NodeInfo newNodeResponse(StreamInput in) throws IOException { + return new NodeInfo(in); } @Override @@ -77,20 +77,15 @@ public static class NodeInfoRequest extends BaseNodeRequest { NodesInfoRequest request; - public NodeInfoRequest() { + public NodeInfoRequest(StreamInput in) throws IOException { + super(in); + request = new NodesInfoRequest(in); } public NodeInfoRequest(NodesInfoRequest request) { this.request = request; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = new NodesInfoRequest(); - request.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessResponse.java deleted file mode 100644 index 7cb8664af984b..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessResponse.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.liveness; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Transport level private response for the transport handler registered under - * {@value org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction#NAME} - */ -public final class LivenessResponse extends ActionResponse { - - private DiscoveryNode node; - private ClusterName clusterName; - - public LivenessResponse() { - } - - public LivenessResponse(ClusterName clusterName, DiscoveryNode node) { - this.node = node; - this.clusterName = clusterName; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - clusterName = new ClusterName(in); - node = in.readOptionalWriteable(DiscoveryNode::new); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - clusterName.writeTo(out); - out.writeOptionalWriteable(node); - } - - public ClusterName getClusterName() { - return clusterName; - } - - public DiscoveryNode getDiscoveryNode() { - return node; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java deleted file mode 100644 index ef8014cade4dc..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.liveness; - -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportService; - -public final class TransportLivenessAction implements TransportRequestHandler { - - private final ClusterService clusterService; - public static final String NAME = "cluster:monitor/nodes/liveness"; - - @Inject - public TransportLivenessAction(ClusterService clusterService, TransportService transportService) { - this.clusterService = clusterService; - transportService.registerRequestHandler(NAME, LivenessRequest::new, ThreadPool.Names.SAME, - false, false /*can not trip circuit breaker*/, this); - } - - @Override - public void messageReceived(LivenessRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(new LivenessResponse(clusterService.getClusterName(), clusterService.localNode())); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java index e22595c187092..a995ce230f009 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java @@ -19,21 +19,14 @@ package org.elasticsearch.action.admin.cluster.node.reload; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class NodesReloadSecureSettingsAction - extends StreamableResponseActionType { +public class NodesReloadSecureSettingsAction extends ActionType { public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; private NodesReloadSecureSettingsAction() { - super(NAME); + super(NAME, NodesReloadSecureSettingsResponse::new); } - - @Override - public NodesReloadSecureSettingsResponse newResponse() { - return new NodesReloadSecureSettingsResponse(); - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index fb3e6ac71adf3..1a2c3c913ac15 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -19,22 +19,97 @@ package org.elasticsearch.action.admin.cluster.node.reload; +import org.elasticsearch.Version; import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +import org.elasticsearch.common.CharArrays; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; + +import java.util.Arrays; /** - * Request for a reload secure settings action. + * Request for a reload secure settings action */ public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { + /** + * The password is used to re-read and decrypt the contents + * of the node's keystore (backing the implementation of + * {@code SecureSettings}). + */ + @Nullable + private SecureString secureSettingsPassword; + public NodesReloadSecureSettingsRequest() { + super((String[]) null); + } + + public NodesReloadSecureSettingsRequest(StreamInput in) throws IOException { + super(in); + if (in.getVersion().onOrAfter(Version.V_7_4_0)) { + final BytesReference bytesRef = in.readOptionalBytesReference(); + if (bytesRef != null) { + byte[] bytes = BytesReference.toBytes(bytesRef); + try { + this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(bytes)); + } finally { + Arrays.fill(bytes, (byte) 0); + } + } else { + this.secureSettingsPassword = null; + } + } } /** - * Reload secure settings only on certain nodes, based on the nodes IDs specified. If none are passed, secure settings will be reloaded - * on all the nodes. + * Reload secure settings only on certain nodes, based on the nodes ids + * specified. If none are passed, secure settings will be reloaded on all the + * nodes. */ - public NodesReloadSecureSettingsRequest(final String... nodesIds) { + public NodesReloadSecureSettingsRequest(String... nodesIds) { super(nodesIds); } + @Nullable + public SecureString getSecureSettingsPassword() { + return secureSettingsPassword; + } + + public void setSecureStorePassword(SecureString secureStorePassword) { + this.secureSettingsPassword = secureStorePassword; + } + + public void closePassword() { + if (this.secureSettingsPassword != null) { + this.secureSettingsPassword.close(); + } + } + + boolean hasPassword() { + return this.secureSettingsPassword != null && this.secureSettingsPassword.length() > 0; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_4_0)) { + if (this.secureSettingsPassword == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); + } + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index c8250455e6ba3..c3c0401efdf17 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.SecureString; /** * Builder for the reload secure settings nodes request @@ -32,4 +33,9 @@ public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client, Nodes super(client, action, new NodesReloadSecureSettingsRequest()); } + public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) { + request.setSecureStorePassword(secureStorePassword); + return this; + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java index 394b1f10dc2d9..ce278589a4408 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java @@ -40,7 +40,8 @@ public class NodesReloadSecureSettingsResponse extends BaseNodesResponse implements ToXContentFragment { - public NodesReloadSecureSettingsResponse() { + public NodesReloadSecureSettingsResponse(StreamInput in) throws IOException { + super(in); } public NodesReloadSecureSettingsResponse(ClusterName clusterName, List nodes, List failures) { @@ -49,12 +50,12 @@ public NodesReloadSecureSettingsResponse(ClusterName clusterName, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::readNodeResponse); + return in.readList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override @@ -92,7 +93,11 @@ public static class NodeResponse extends BaseNodeResponse { private Exception reloadException = null; - public NodeResponse() { + public NodeResponse(StreamInput in) throws IOException { + super(in); + if (in.readBoolean()) { + reloadException = in.readException(); + } } public NodeResponse(DiscoveryNode node, Exception reloadException) { @@ -104,14 +109,6 @@ public Exception reloadException() { return this.reloadException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - if (in.readBoolean()) { - reloadException = in.readException(); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -139,11 +136,5 @@ public boolean equals(Object o) { public int hashCode() { return reloadException != null ? reloadException.hashCode() : 0; } - - public static NodeResponse readNodeResponse(StreamInput in) throws IOException { - final NodeResponse node = new NodeResponse(); - node.readFrom(in); - return node; - } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index 27860b52557e7..21c4e4a4336c4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -21,16 +21,20 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginsService; @@ -74,19 +78,43 @@ protected NodeRequest newNodeRequest(NodesReloadSecureSettingsRequest request) { } @Override - protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { - return new NodesReloadSecureSettingsResponse.NodeResponse(); + protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse(StreamInput in) throws IOException { + return new NodesReloadSecureSettingsResponse.NodeResponse(in); + } + + @Override + protected void doExecute(Task task, NodesReloadSecureSettingsRequest request, + ActionListener listener) { + if (request.hasPassword() && isNodeLocal(request) == false && isNodeTransportTLSEnabled() == false) { + request.closePassword(); + listener.onFailure( + new ElasticsearchException("Secure settings cannot be updated cluster wide when TLS for the transport layer" + + " is not enabled. Enable TLS or use the API with a `_local` filter on each node.")); + } else { + super.doExecute(task, request, ActionListener.wrap(response -> { + request.closePassword(); + listener.onResponse(response); + }, e -> { + request.closePassword(); + listener.onFailure(e); + })); + } } @Override protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest, Task task) { + final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; + // We default to using an empty string as the keystore password so that we mimic pre 7.3 API behavior + final SecureString secureSettingsPassword = request.hasPassword() ? request.getSecureSettingsPassword() : + new SecureString(new char[0]); try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { // reread keystore from config file if (keystore == null) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), new IllegalStateException("Keystore is missing")); } - keystore.decrypt(new char[0]); + // decrypt the keystore using the password from the request + keystore.decrypt(secureSettingsPassword.getChars()); // add the keystore to the original node settings object final Settings settingsWithKeystore = Settings.builder() .put(environment.settings(), false) @@ -107,6 +135,8 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), null); } catch (final Exception e) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), e); + } finally { + secureSettingsPassword.close(); } } @@ -114,24 +144,35 @@ public static class NodeRequest extends BaseNodeRequest { NodesReloadSecureSettingsRequest request; - public NodeRequest() { + public NodeRequest(StreamInput in) throws IOException { + super(in); + request = new NodesReloadSecureSettingsRequest(in); } NodeRequest(NodesReloadSecureSettingsRequest request) { this.request = request; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = new NodesReloadSecureSettingsRequest(); - request.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); request.writeTo(out); } } + + /** + * Returns true if the node is configured for TLS on the transport layer + */ + private boolean isNodeTransportTLSEnabled() { + return transportService.isTransportSecure(); + } + + private boolean isNodeLocal(NodesReloadSecureSettingsRequest request) { + if (null == request.concreteNodes()) { + resolveRequest(request, clusterService.state()); + assert request.concreteNodes() != null; + } + final DiscoveryNode[] nodes = request.concreteNodes(); + return nodes.length == 1 && nodes[0].getId().equals(clusterService.localNode().getId()); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index 1d32fbef2c557..60f1fc4da1063 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -90,7 +90,24 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private AdaptiveSelectionStats adaptiveSelectionStats; - NodeStats() { + public NodeStats(StreamInput in) throws IOException { + super(in); + timestamp = in.readVLong(); + if (in.readBoolean()) { + indices = new NodeIndicesStats(in); + } + os = in.readOptionalWriteable(OsStats::new); + process = in.readOptionalWriteable(ProcessStats::new); + jvm = in.readOptionalWriteable(JvmStats::new); + threadPool = in.readOptionalWriteable(ThreadPoolStats::new); + fs = in.readOptionalWriteable(FsInfo::new); + transport = in.readOptionalWriteable(TransportStats::new); + http = in.readOptionalWriteable(HttpStats::new); + breaker = in.readOptionalWriteable(AllCircuitBreakerStats::new); + scriptStats = in.readOptionalWriteable(ScriptStats::new); + discoveryStats = in.readOptionalWriteable(DiscoveryStats::new); + ingestStats = in.readOptionalWriteable(IngestStats::new); + adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); } public NodeStats(DiscoveryNode node, long timestamp, @Nullable NodeIndicesStats indices, @@ -210,33 +227,6 @@ public AdaptiveSelectionStats getAdaptiveSelectionStats() { return adaptiveSelectionStats; } - public static NodeStats readNodeStats(StreamInput in) throws IOException { - NodeStats nodeInfo = new NodeStats(); - nodeInfo.readFrom(in); - return nodeInfo; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - timestamp = in.readVLong(); - if (in.readBoolean()) { - indices = NodeIndicesStats.readIndicesStats(in); - } - os = in.readOptionalWriteable(OsStats::new); - process = in.readOptionalWriteable(ProcessStats::new); - jvm = in.readOptionalWriteable(JvmStats::new); - threadPool = in.readOptionalWriteable(ThreadPoolStats::new); - fs = in.readOptionalWriteable(FsInfo::new); - transport = in.readOptionalWriteable(TransportStats::new); - http = in.readOptionalWriteable(HttpStats::new); - breaker = in.readOptionalWriteable(AllCircuitBreakerStats::new); - scriptStats = in.readOptionalWriteable(ScriptStats::new); - discoveryStats = in.readOptionalWriteable(DiscoveryStats::new); - ingestStats = in.readOptionalWriteable(IngestStats::new); - adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java index dbe7deed74a73..cbd2bf20433c7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.cluster.node.stats; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class NodesStatsAction extends StreamableResponseActionType { +public class NodesStatsAction extends ActionType { public static final NodesStatsAction INSTANCE = new NodesStatsAction(); public static final String NAME = "cluster:monitor/nodes/stats"; private NodesStatsAction() { - super(NAME); - } - - @Override - public NodesStatsResponse newResponse() { - return new NodesStatsResponse(); + super(NAME, NodesStatsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 9cd3ff82c93da..205404a564aea 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -46,6 +46,24 @@ public class NodesStatsRequest extends BaseNodesRequest { private boolean adaptiveSelection; public NodesStatsRequest() { + super((String[]) null); + } + + public NodesStatsRequest(StreamInput in) throws IOException { + super(in); + indices = new CommonStatsFlags(in); + os = in.readBoolean(); + process = in.readBoolean(); + jvm = in.readBoolean(); + threadPool = in.readBoolean(); + fs = in.readBoolean(); + transport = in.readBoolean(); + http = in.readBoolean(); + breaker = in.readBoolean(); + script = in.readBoolean(); + discovery = in.readBoolean(); + ingest = in.readBoolean(); + adaptiveSelection = in.readBoolean(); } /** @@ -280,24 +298,6 @@ public NodesStatsRequest adaptiveSelection(boolean adaptiveSelection) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = new CommonStatsFlags(in); - os = in.readBoolean(); - process = in.readBoolean(); - jvm = in.readBoolean(); - threadPool = in.readBoolean(); - fs = in.readBoolean(); - transport = in.readBoolean(); - http = in.readBoolean(); - breaker = in.readBoolean(); - script = in.readBoolean(); - discovery = in.readBoolean(); - ingest = in.readBoolean(); - adaptiveSelection = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java index 78b33021a4b5c..97fdcad1ba2db 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -34,7 +34,8 @@ public class NodesStatsResponse extends BaseNodesResponse implements ToXContentFragment { - NodesStatsResponse() { + public NodesStatsResponse(StreamInput in) throws IOException { + super(in); } public NodesStatsResponse(ClusterName clusterName, List nodes, List failures) { @@ -43,12 +44,12 @@ public NodesStatsResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeStats::readNodeStats); + return in.readList(NodeStats::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index f399304a8a10e..06900e435a570 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -61,8 +61,8 @@ protected NodeStatsRequest newNodeRequest(NodesStatsRequest request) { } @Override - protected NodeStats newNodeResponse() { - return new NodeStats(); + protected NodeStats newNodeResponse(StreamInput in) throws IOException { + return new NodeStats(in); } @Override @@ -77,20 +77,15 @@ public static class NodeStatsRequest extends BaseNodeRequest { NodesStatsRequest request; - public NodeStatsRequest() { + public NodeStatsRequest(StreamInput in) throws IOException { + super(in); + request = new NodesStatsRequest(in); } NodeStatsRequest(NodesStatsRequest request) { this.request = request; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = new NodesStatsRequest(); - request.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java index a0fa139dc7364..d198102c6591c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; /** * ActionType for cancelling running tasks @@ -31,11 +30,7 @@ public class CancelTasksAction extends ActionType { public static final String NAME = "cluster:admin/tasks/cancel"; private CancelTasksAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return CancelTasksResponse::new; + super(NAME, CancelTasksResponse::new); } } + diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java index 978e07555b517..4a689a664fa91 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java @@ -19,23 +19,18 @@ package org.elasticsearch.action.admin.cluster.node.tasks.get; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for retrieving a list of currently running tasks */ -public class GetTaskAction extends StreamableResponseActionType { +public class GetTaskAction extends ActionType { public static final String TASKS_ORIGIN = "tasks"; public static final GetTaskAction INSTANCE = new GetTaskAction(); public static final String NAME = "cluster:monitor/task/get"; private GetTaskAction() { - super(NAME); - } - - @Override - public GetTaskResponse newResponse() { - return new GetTaskResponse(); + super(NAME, GetTaskResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java index b8eb33edc82de..4063333cbb6f2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java @@ -41,6 +41,15 @@ public class GetTaskRequest extends ActionRequest { /** * Get the TaskId to look up. */ + public GetTaskRequest() {} + + public GetTaskRequest(StreamInput in) throws IOException { + super(in); + taskId = TaskId.readFromStream(in); + timeout = in.readOptionalTimeValue(); + waitForCompletion = in.readBoolean(); + } + public TaskId getTaskId() { return taskId; } @@ -101,14 +110,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - taskId = TaskId.readFromStream(in); - timeout = in.readOptionalTimeValue(); - waitForCompletion = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java index b1369c33f7181..3789f9827b1d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java @@ -35,18 +35,15 @@ * Returns the list of tasks currently running on the nodes */ public class GetTaskResponse extends ActionResponse implements ToXContentObject { - private TaskResult task; - public GetTaskResponse() { - } + private final TaskResult task; public GetTaskResponse(TaskResult task) { this.task = requireNonNull(task, "task is required"); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public GetTaskResponse(StreamInput in) throws IOException { + super(in); task = in.readOptionalWriteable(TaskResult::new); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 2b0ac0233be29..0e3df01db525a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -121,9 +121,7 @@ private void runOnNodeWithTaskIfPossible(Task thisTask, GetTaskRequest request, new TransportResponseHandler() { @Override public GetTaskResponse read(StreamInput in) throws IOException { - GetTaskResponse response = new GetTaskResponse(); - response.readFrom(in); - return response; + return new GetTaskResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksAction.java index 7fb0b94cd8766..cc10d98c6bd36 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; /** * ActionType for retrieving a list of currently running tasks @@ -31,11 +30,7 @@ public class ListTasksAction extends ActionType { public static final String NAME = "cluster:monitor/tasks/lists"; private ListTasksAction() { - super(NAME); + super(NAME, ListTasksResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return ListTasksResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodeUsage.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodeUsage.java index d963a6d5e3989..cd5aeef64313c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodeUsage.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodeUsage.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -36,13 +35,11 @@ public class NodeUsage extends BaseNodeResponse implements ToXContentFragment { private long sinceTime; private Map restUsage; - NodeUsage() { - } - - public static NodeUsage readNodeStats(StreamInput in) throws IOException { - NodeUsage nodeInfo = new NodeUsage(); - nodeInfo.readFrom(in); - return nodeInfo; + public NodeUsage(StreamInput in) throws IOException { + super(in); + timestamp = in.readLong(); + sinceTime = in.readLong(); + restUsage = (Map) in.readGenericValue(); } /** @@ -96,15 +93,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @SuppressWarnings("unchecked") - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - timestamp = in.readLong(); - sinceTime = in.readLong(); - restUsage = (Map) in.readGenericValue(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageAction.java index aaf7616a19b1b..5e7ca5c7dec97 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageAction.java @@ -19,20 +19,14 @@ package org.elasticsearch.action.admin.cluster.node.usage; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class NodesUsageAction extends StreamableResponseActionType { +public class NodesUsageAction extends ActionType { public static final NodesUsageAction INSTANCE = new NodesUsageAction(); public static final String NAME = "cluster:monitor/nodes/usage"; protected NodesUsageAction() { - super(NAME); + super(NAME, NodesUsageResponse::new); } - - @Override - public NodesUsageResponse newResponse() { - return new NodesUsageResponse(); - } - } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequest.java index c4e80494aed5c..1a8b7926fe987 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequest.java @@ -29,8 +29,9 @@ public class NodesUsageRequest extends BaseNodesRequest { private boolean restActions; - public NodesUsageRequest() { - super(); + public NodesUsageRequest(StreamInput in) throws IOException { + super(in); + this.restActions = in.readBoolean(); } /** @@ -72,12 +73,6 @@ public NodesUsageRequest restActions(boolean restActions) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.restActions = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageResponse.java index f84ccb738df03..1e9e9e34b4aad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageResponse.java @@ -38,7 +38,8 @@ */ public class NodesUsageResponse extends BaseNodesResponse implements ToXContentFragment { - NodesUsageResponse() { + public NodesUsageResponse(StreamInput in) throws IOException { + super(in); } public NodesUsageResponse(ClusterName clusterName, List nodes, List failures) { @@ -47,12 +48,12 @@ public NodesUsageResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeUsage::readNodeStats); + return in.readList(NodeUsage::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java index ed40aaddae24a..853e4c6badec4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java @@ -59,8 +59,8 @@ protected NodeUsageRequest newNodeRequest(NodesUsageRequest request) { } @Override - protected NodeUsage newNodeResponse() { - return new NodeUsage(); + protected NodeUsage newNodeResponse(StreamInput in) throws IOException { + return new NodeUsage(in); } @Override @@ -73,20 +73,15 @@ public static class NodeUsageRequest extends BaseNodeRequest { NodesUsageRequest request; - public NodeUsageRequest() { + public NodeUsageRequest(StreamInput in) throws IOException { + super(in); + request = new NodesUsageRequest(in); } NodeUsageRequest(NodesUsageRequest request) { this.request = request; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = new NodesUsageRequest(); - request.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java index 6ced57bde05c1..9e6befcfd1d05 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.cluster.remote; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public final class RemoteInfoAction extends StreamableResponseActionType { +public final class RemoteInfoAction extends ActionType { public static final String NAME = "cluster:monitor/remote/info"; public static final RemoteInfoAction INSTANCE = new RemoteInfoAction(); public RemoteInfoAction() { - super(NAME); - } - - @Override - public RemoteInfoResponse newResponse() { - return new RemoteInfoResponse(); + super(NAME, RemoteInfoResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java index e13c7fc9146a5..95f685614c40e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java @@ -27,11 +27,9 @@ public final class RemoteInfoRequest extends ActionRequest { - public RemoteInfoRequest() { + public RemoteInfoRequest() {} - } - - public RemoteInfoRequest(StreamInput in) throws IOException { + RemoteInfoRequest(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java index 1c4020fa33e25..894cd9cf9fec4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java @@ -34,7 +34,9 @@ public final class RemoteInfoResponse extends ActionResponse implements ToXConte private List infos; - RemoteInfoResponse() { + RemoteInfoResponse(StreamInput in) throws IOException { + super(in); + infos = in.readList(RemoteConnectionInfo::new); } RemoteInfoResponse(Collection infos) { @@ -50,12 +52,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(infos); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - infos = in.readList(RemoteConnectionInfo::new); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java index 46d6735658646..dd3198a003a32 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java @@ -28,8 +28,6 @@ import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; -import java.util.function.Supplier; - import static java.util.stream.Collectors.toList; public final class TransportRemoteInfoAction extends HandledTransportAction { @@ -39,8 +37,7 @@ public final class TransportRemoteInfoAction extends HandledTransportAction) RemoteInfoRequest::new); + super(RemoteInfoAction.NAME, transportService, actionFilters, RemoteInfoRequest::new); this.remoteClusterService = searchTransportService.getRemoteClusterService(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java index 791e4c6e57d70..26b2ef53fd6f4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; /** * Unregister repository action @@ -32,12 +31,8 @@ public class DeleteRepositoryAction extends ActionType { public static final String NAME = "cluster:admin/repository/delete"; private DeleteRepositoryAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 26403b3b45d06..5aaa17a3ce90e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -37,6 +37,11 @@ public class DeleteRepositoryRequest extends AcknowledgedRequest { public static final String NAME = "cluster:admin/repository/get"; private GetRepositoriesAction() { - super(NAME); + super(NAME, GetRepositoriesResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return GetRepositoriesResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index 9c2e2b80a6d28..539ba074c1959 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -93,9 +93,4 @@ public GetRepositoriesRequest repositories(String[] repositories) { this.repositories = repositories; return this; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 4fb9cbaebe941..69ade6d8fe0b0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -58,11 +58,6 @@ public List repositories() { } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { repositories.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index 3f0ee69d2a5aa..e7ea40c5f53aa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -63,11 +63,6 @@ protected String executor() { return ThreadPool.Names.MANAGEMENT; } - @Override - protected GetRepositoriesResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected GetRepositoriesResponse read(StreamInput in) throws IOException { return new GetRepositoriesResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java index 0aa5c6a5d51f9..a0df1fd953b67 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; /** * Register repository action @@ -32,12 +31,8 @@ public class PutRepositoryAction extends ActionType { public static final String NAME = "cluster:admin/repository/put"; private PutRepositoryAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 82f0e38572e77..c9a2c6b751734 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -55,6 +55,14 @@ public class PutRepositoryRequest extends AcknowledgedRequest repositoryDefinition) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - name = in.readString(); - type = in.readString(); - settings = readSettingsFromStream(in); - verify = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 21f4c7e6f384d..e073ce4498f7a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -49,7 +49,7 @@ public TransportPutRepositoryAction(TransportService transportService, ClusterSe RepositoriesService repositoriesService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(PutRepositoryAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, PutRepositoryRequest::new); + PutRepositoryRequest::new, indexNameExpressionResolver); this.repositoriesService = repositoriesService; } @@ -63,11 +63,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterBlockException checkBlock(PutRepositoryRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 3de71e346f3c4..d0402e648b62e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -29,15 +29,19 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + /** * Transport action for verifying repository operation */ -public class TransportVerifyRepositoryAction extends TransportMasterNodeAction { +public class TransportVerifyRepositoryAction extends + TransportMasterNodeAction { private final RepositoriesService repositoriesService; @@ -47,7 +51,7 @@ public TransportVerifyRepositoryAction(TransportService transportService, Cluste RepositoriesService repositoriesService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(VerifyRepositoryAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, VerifyRepositoryRequest::new); + VerifyRepositoryRequest::new, indexNameExpressionResolver); this.repositoriesService = repositoriesService; } @@ -57,8 +61,8 @@ protected String executor() { } @Override - protected VerifyRepositoryResponse newResponse() { - return new VerifyRepositoryResponse(); + protected VerifyRepositoryResponse read(StreamInput in) throws IOException { + return new VerifyRepositoryResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java index 9601ceecf0dcd..21a573e85803d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java @@ -19,23 +19,18 @@ package org.elasticsearch.action.admin.cluster.repositories.verify; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * Unregister repository action */ -public class VerifyRepositoryAction extends StreamableResponseActionType { +public class VerifyRepositoryAction extends ActionType { public static final VerifyRepositoryAction INSTANCE = new VerifyRepositoryAction(); public static final String NAME = "cluster:admin/repository/verify"; private VerifyRepositoryAction() { - super(NAME); - } - - @Override - public VerifyRepositoryResponse newResponse() { - return new VerifyRepositoryResponse(); + super(NAME, VerifyRepositoryResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index 0d0384591938e..32eaf16ee6e61 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -37,6 +37,11 @@ public class VerifyRepositoryRequest extends AcknowledgedRequest nodes) { this.nodes = nodes; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.nodes = in.readList(NodeView::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(nodes); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java index 82577a58cace7..35fb26595fd08 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.reroute; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class ClusterRerouteAction extends ActionType { @@ -28,11 +27,6 @@ public class ClusterRerouteAction extends ActionType { public static final String NAME = "cluster:admin/reroute"; private ClusterRerouteAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return ClusterRerouteResponse::new; + super(NAME, ClusterRerouteResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index 5bdde81e6deb6..13e1e94907568 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -38,6 +38,14 @@ public class ClusterRerouteRequest extends AcknowledgedRequest { @@ -28,11 +27,6 @@ public class ClusterUpdateSettingsAction extends ActionType getResponseReader() { - return ClusterUpdateSettingsResponse::new; + super(NAME, ClusterUpdateSettingsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 7f74ae668e1bd..41ef1e230d744 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -61,6 +61,12 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest source) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - transientSettings = readSettingsFromStream(in); - persistentSettings = readSettingsFromStream(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index ec55a21bac4bf..777b2f4ca9d60 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -56,7 +56,7 @@ public TransportClusterUpdateSettingsAction(TransportService transportService, C ThreadPool threadPool, AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) { super(ClusterUpdateSettingsAction.NAME, false, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); + ClusterUpdateSettingsRequest::new, indexNameExpressionResolver); this.allocationService = allocationService; this.clusterSettings = clusterSettings; } @@ -82,12 +82,6 @@ protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } - - @Override - protected ClusterUpdateSettingsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterUpdateSettingsResponse read(StreamInput in) throws IOException { return new ClusterUpdateSettingsResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java index 837fb9e39709f..3e10bd5d1a495 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.shards; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class ClusterSearchShardsAction extends ActionType { @@ -28,11 +27,6 @@ public class ClusterSearchShardsAction extends ActionType getResponseReader() { - return ClusterSearchShardsResponse::new; + super(NAME, ClusterSearchShardsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 4798aeb67c199..e1ba4b76e4168 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -148,9 +148,4 @@ public ClusterSearchShardsRequest preference(String preference) { public String preference() { return this.preference; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index ce9979064788a..4209a36c934b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -57,11 +57,6 @@ public ClusterSearchShardsResponse(StreamInput in) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(groups.length); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 23b9f34a241c3..f173509207013 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -47,7 +47,7 @@ import java.util.Set; public class TransportClusterSearchShardsAction extends - TransportMasterNodeReadAction { + TransportMasterNodeReadAction { private final IndicesService indicesService; @@ -72,11 +72,6 @@ protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, C indexNameExpressionResolver.concreteIndexNames(state, request)); } - @Override - protected ClusterSearchShardsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterSearchShardsResponse read(StreamInput in) throws IOException { return new ClusterSearchShardsResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java index a7a8e5467c71e..0ca295b12e21f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java @@ -19,23 +19,18 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * Create snapshot action */ -public class CreateSnapshotAction extends StreamableResponseActionType { +public class CreateSnapshotAction extends ActionType { public static final CreateSnapshotAction INSTANCE = new CreateSnapshotAction(); public static final String NAME = "cluster:admin/snapshot/create"; private CreateSnapshotAction() { - super(NAME); - } - - @Override - public CreateSnapshotResponse newResponse() { - return new CreateSnapshotResponse(); + super(NAME, CreateSnapshotResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index e0250da5ed1cb..3df1383e32e6d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -164,7 +163,7 @@ public ActionRequestValidationException validate() { return validationException; } - private static int metadataSize(Map userMetadata) { + public static int metadataSize(Map userMetadata) { if (userMetadata == null) { return 0; } @@ -431,8 +430,8 @@ public CreateSnapshotRequest source(Map source) { if (name.equals("indices")) { if (entry.getValue() instanceof String) { indices(Strings.splitStringByCommaToArray((String) entry.getValue())); - } else if (entry.getValue() instanceof ArrayList) { - indices((ArrayList) entry.getValue()); + } else if (entry.getValue() instanceof List) { + indices((List) entry.getValue()); } else { throw new IllegalArgumentException("malformed indices section, should be an array of strings"); } @@ -483,11 +482,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String getDescription() { return "snapshot [" + repository + ":" + snapshot + "]"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index 97dfe91b03a52..a63f2cf810475 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -51,11 +51,15 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent @Nullable private SnapshotInfo snapshotInfo; + CreateSnapshotResponse() {} + CreateSnapshotResponse(@Nullable SnapshotInfo snapshotInfo) { this.snapshotInfo = snapshotInfo; } - CreateSnapshotResponse() { + public CreateSnapshotResponse(StreamInput in) throws IOException { + super(in); + snapshotInfo = in.readOptionalWriteable(SnapshotInfo::new); } private void setSnapshotInfoFromBuilder(SnapshotInfoBuilder snapshotInfoBuilder) { @@ -71,12 +75,6 @@ public SnapshotInfo getSnapshotInfo() { return snapshotInfo; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - snapshotInfo = in.readOptionalWriteable(SnapshotInfo::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(snapshotInfo); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index a3de0d4f0a975..9e6bc7b013970 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -28,11 +28,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + /** * Transport action for create snapshot operation */ @@ -56,8 +59,8 @@ protected String executor() { } @Override - protected CreateSnapshotResponse newResponse() { - return new CreateSnapshotResponse(); + protected CreateSnapshotResponse read(StreamInput in) throws IOException { + return new CreateSnapshotResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java index 97a3405e9bdf6..ee4264b461c96 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; /** * Delete snapshot action @@ -32,12 +31,8 @@ public class DeleteSnapshotAction extends ActionType { public static final String NAME = "cluster:admin/snapshot/delete"; private DeleteSnapshotAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index 09a93f62c5291..93581c937c50e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -125,9 +125,4 @@ public DeleteSnapshotRequest snapshot(String snapshot) { this.snapshot = snapshot; return this; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 3f980225f434c..5c3395012d30d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -62,11 +62,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterBlockException checkBlock(DeleteSnapshotRequest request, ClusterState state) { // Cluster is not affected but we look up repositories in metadata diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java index ab0823b98f71c..6c34acf3814f9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; /** * Get snapshots action @@ -31,12 +30,8 @@ public class GetSnapshotsAction extends ActionType { public static final String NAME = "cluster:admin/snapshot/get"; private GetSnapshotsAction() { - super(NAME); + super(NAME, GetSnapshotsResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return GetSnapshotsResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 4bfd656700684..c81281669db02 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -185,9 +185,4 @@ public GetSnapshotsRequest verbose(boolean verbose) { public boolean verbose() { return verbose; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 0fdd2c6a5d5a7..595ed753c8359 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -257,11 +257,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - public static GetSnapshotsResponse fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index e1b50e469d26b..9dcad68ba24ed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -80,11 +80,6 @@ protected String executor() { return ThreadPool.Names.GENERIC; } - @Override - protected GetSnapshotsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected GetSnapshotsResponse read(StreamInput in) throws IOException { return new GetSnapshotsResponse(in); @@ -154,7 +149,7 @@ private List getSingleRepoSnapshotInfo(String repo, String[] snaps final RepositoryData repositoryData; if (isCurrentSnapshotsOnly(snapshots) == false) { repositoryData = snapshotsService.getRepositoryData(repo); - for (SnapshotId snapshotId : repositoryData.getAllSnapshotIds()) { + for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { allSnapshotIds.put(snapshotId.getName(), snapshotId); } } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java index a74aad3ddb586..3439bbc47ec91 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java @@ -58,7 +58,7 @@ public void clusterChanged(ClusterChangedEvent changedEvent) { // on the current master and as such it might miss some intermediary cluster states due to batching. // Clean up listener in that case and acknowledge completion of restore operation to client. clusterService.removeListener(this); - listener.onResponse(new RestoreSnapshotResponse(null)); + listener.onResponse(new RestoreSnapshotResponse((RestoreInfo) null)); } else if (newEntry == null) { clusterService.removeListener(this); ImmutableOpenMap shards = prevEntry.shards(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java index 7f473d10a28ab..7073a5a9638a1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java @@ -19,23 +19,18 @@ package org.elasticsearch.action.admin.cluster.snapshots.restore; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * Restore snapshot action */ -public class RestoreSnapshotAction extends StreamableResponseActionType { +public class RestoreSnapshotAction extends ActionType { public static final RestoreSnapshotAction INSTANCE = new RestoreSnapshotAction(); public static final String NAME = "cluster:admin/snapshot/restore"; private RestoreSnapshotAction() { - super(NAME); - } - - @Override - public RestoreSnapshotResponse newResponse() { - return new RestoreSnapshotResponse(); + super(NAME, RestoreSnapshotResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 53aa522772aac..0d2cac9b401dd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -609,11 +609,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String getDescription() { return "snapshot [" + repository + ":" + snapshot + "]"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java index 5b837991ff578..942cf1c3eb475 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java @@ -49,7 +49,9 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten this.restoreInfo = restoreInfo; } - RestoreSnapshotResponse() { + public RestoreSnapshotResponse(StreamInput in) throws IOException { + super(in); + restoreInfo = RestoreInfo.readOptionalRestoreInfo(in); } /** @@ -61,15 +63,9 @@ public RestoreInfo getRestoreInfo() { return restoreInfo; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - restoreInfo = RestoreInfo.readOptionalRestoreInfo(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalStreamable(restoreInfo); + out.writeOptionalWriteable(restoreInfo); } public RestStatus status() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index 5178a5224a058..71437718af7bb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -28,11 +28,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + /** * Transport action for restore snapshot operation */ @@ -56,8 +59,8 @@ protected String executor() { } @Override - protected RestoreSnapshotResponse newResponse() { - return new RestoreSnapshotResponse(); + protected RestoreSnapshotResponse read(StreamInput in) throws IOException { + return new RestoreSnapshotResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 5159f334250a6..e9eb63ad5dbdb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -50,7 +50,12 @@ public class SnapshotIndexShardStatus extends BroadcastShardResponse implements private String failure; - private SnapshotIndexShardStatus() { + public SnapshotIndexShardStatus(StreamInput in) throws IOException { + super(in); + stage = SnapshotIndexShardStage.fromValue(in.readByte()); + stats = new SnapshotStats(in); + nodeId = in.readOptionalString(); + failure = in.readOptionalString(); } SnapshotIndexShardStatus(ShardId shardId, SnapshotIndexShardStage stage) { @@ -127,13 +132,6 @@ public String getFailure() { return failure; } - - public static SnapshotIndexShardStatus readShardSnapshotStatus(StreamInput in) throws IOException { - SnapshotIndexShardStatus shardStatus = new SnapshotIndexShardStatus(); - shardStatus.readFrom(in); - return shardStatus; - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -143,15 +141,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(failure); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - stage = SnapshotIndexShardStage.fromValue(in.readByte()); - stats = SnapshotStats.readSnapshotStats(in); - nodeId = in.readOptionalString(); - failure = in.readOptionalString(); - } - static final class Fields { static final String STAGE = "stage"; static final String REASON = "reason"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java index 512df16068c24..27e9a2a33e594 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -32,7 +32,7 @@ import java.io.IOException; -public class SnapshotStats implements Streamable, ToXContentObject { +public class SnapshotStats implements Writeable, ToXContentObject { private long startTime; private long time; @@ -43,7 +43,20 @@ public class SnapshotStats implements Streamable, ToXContentObject { private long totalSize; private long processedSize; - SnapshotStats() { + SnapshotStats() {} + + SnapshotStats(StreamInput in) throws IOException { + startTime = in.readVLong(); + time = in.readVLong(); + + incrementalFileCount = in.readVInt(); + processedFileCount = in.readVInt(); + + incrementalSize = in.readVLong(); + processedSize = in.readVLong(); + + totalFileCount = in.readVInt(); + totalSize = in.readVLong(); } SnapshotStats(long startTime, long time, @@ -115,13 +128,6 @@ public long getProcessedSize() { return processedSize; } - - public static SnapshotStats readSnapshotStats(StreamInput in) throws IOException { - SnapshotStats stats = new SnapshotStats(); - stats.readFrom(in); - return stats; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(startTime); @@ -137,21 +143,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalSize); } - @Override - public void readFrom(StreamInput in) throws IOException { - startTime = in.readVLong(); - time = in.readVLong(); - - incrementalFileCount = in.readVInt(); - processedFileCount = in.readVInt(); - - incrementalSize = in.readVLong(); - processedSize = in.readVLong(); - - totalFileCount = in.readVInt(); - totalSize = in.readVLong(); - } - static final class Fields { static final String STATS = "stats"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 613887274b3ca..b224b0913060f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -55,7 +55,7 @@ /** * Status of a snapshot */ -public class SnapshotStatus implements ToXContentObject, Streamable { +public class SnapshotStatus implements ToXContentObject, Writeable { private Snapshot snapshot; @@ -72,6 +72,28 @@ public class SnapshotStatus implements ToXContentObject, Streamable { @Nullable private Boolean includeGlobalState; + SnapshotStatus(StreamInput in) throws IOException { + snapshot = new Snapshot(in); + state = State.fromValue(in.readByte()); + int size = in.readVInt(); + List builder = new ArrayList<>(); + for (int i = 0; i < size; i++) { + builder.add(new SnapshotIndexShardStatus(in)); + } + shards = Collections.unmodifiableList(builder); + includeGlobalState = in.readOptionalBoolean(); + final long startTime; + final long time; + if (in.getVersion().onOrAfter(Version.V_7_4_0)) { + startTime = in.readLong(); + time = in.readLong(); + } else { + startTime = 0L; + time = 0L; + } + updateShardStats(startTime, time); + } + SnapshotStatus(Snapshot snapshot, State state, List shards, Boolean includeGlobalState, long startTime, long time) { this.snapshot = Objects.requireNonNull(snapshot); @@ -94,9 +116,6 @@ private SnapshotStatus(Snapshot snapshot, State state, List getIndices() { } - @Override - public void readFrom(StreamInput in) throws IOException { - snapshot = new Snapshot(in); - state = State.fromValue(in.readByte()); - int size = in.readVInt(); - List builder = new ArrayList<>(); - for (int i = 0; i < size; i++) { - builder.add(SnapshotIndexShardStatus.readShardSnapshotStatus(in)); - } - shards = Collections.unmodifiableList(builder); - includeGlobalState = in.readOptionalBoolean(); - final long startTime; - final long time; - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { - startTime = in.readLong(); - time = in.readLong(); - } else { - startTime = 0L; - time = 0L; - } - updateShardStats(startTime, time); - } - @Override public void writeTo(StreamOutput out) throws IOException { snapshot.writeTo(out); @@ -191,24 +187,12 @@ public void writeTo(StreamOutput out) throws IOException { shard.writeTo(out); } out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getVersion().onOrAfter(Version.V_7_4_0)) { out.writeLong(stats.getStartTime()); out.writeLong(stats.getTime()); } } - /** - * Reads snapshot status from stream input - * - * @param in stream input - * @return deserialized snapshot status - */ - public static SnapshotStatus readSnapshotStatus(StreamInput in) throws IOException { - SnapshotStatus snapshotInfo = new SnapshotStatus(); - snapshotInfo.readFrom(in); - return snapshotInfo; - } - @Override public String toString() { return Strings.toString(this, true, false); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java index ec95e0e14f13f..b0833c777b7ad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java @@ -19,23 +19,18 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * Snapshots status action */ -public class SnapshotsStatusAction extends StreamableResponseActionType { +public class SnapshotsStatusAction extends ActionType { public static final SnapshotsStatusAction INSTANCE = new SnapshotsStatusAction(); public static final String NAME = "cluster:admin/snapshot/status"; private SnapshotsStatusAction() { - super(NAME); - } - - @Override - public SnapshotsStatusResponse newResponse() { - return new SnapshotsStatusResponse(); + super(NAME, SnapshotsStatusResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java index 89a96648871a3..ae750b93b5d9c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java @@ -149,9 +149,4 @@ public SnapshotsStatusRequest ignoreUnavailable(boolean ignoreUnavailable) { public boolean ignoreUnavailable() { return ignoreUnavailable; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 20a37823f2b9f..acf609b9bd1f3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -42,7 +42,14 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten private List snapshots = Collections.emptyList(); - SnapshotsStatusResponse() { + public SnapshotsStatusResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + List builder = new ArrayList<>(); + for (int i = 0; i < size; i++) { + builder.add(new SnapshotStatus(in)); + } + snapshots = Collections.unmodifiableList(builder); } SnapshotsStatusResponse(List snapshots) { @@ -58,17 +65,6 @@ public List getSnapshots() { return snapshots; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - List builder = new ArrayList<>(); - for (int i = 0; i < size; i++) { - builder.add(SnapshotStatus.readSnapshotStatus(in)); - } - snapshots = Collections.unmodifiableList(builder); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(snapshots.size()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 74b9e8cb3c592..6a83bc71c5ccc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; @@ -30,6 +31,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -57,9 +59,11 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction { public static final String ACTION_NAME = SnapshotsStatusAction.NAME + "[nodes]"; + public static final ActionType TYPE = new ActionType<>(ACTION_NAME, NodesSnapshotStatus::new); private final SnapshotShardsService snapshotShardsService; + @Inject public TransportNodesSnapshotsStatus(ThreadPool threadPool, ClusterService clusterService, TransportService transportService, SnapshotShardsService snapshotShardsService, ActionFilters actionFilters) { @@ -74,8 +78,8 @@ protected NodeRequest newNodeRequest(Request request) { } @Override - protected NodeSnapshotStatus newNodeResponse() { - return new NodeSnapshotStatus(); + protected NodeSnapshotStatus newNodeResponse(StreamInput in) throws IOException { + return new NodeSnapshotStatus(in); } @Override @@ -119,7 +123,10 @@ public static class Request extends BaseNodesRequest { private Snapshot[] snapshots; - public Request() { + public Request(StreamInput in) throws IOException { + super(in); + // This operation is never executed remotely + throw new UnsupportedOperationException("shouldn't be here"); } public Request(String[] nodesIds) { @@ -131,12 +138,6 @@ public Request snapshots(Snapshot[] snapshots) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - // This operation is never executed remotely - throw new UnsupportedOperationException("shouldn't be here"); - } - @Override public void writeTo(StreamOutput out) throws IOException { // This operation is never executed remotely @@ -146,18 +147,22 @@ public void writeTo(StreamOutput out) throws IOException { public static class NodesSnapshotStatus extends BaseNodesResponse { + public NodesSnapshotStatus(StreamInput in) throws IOException { + super(in); + } + public NodesSnapshotStatus(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readStreamableList(NodeSnapshotStatus::new); + return in.readList(NodeSnapshotStatus::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } } @@ -166,19 +171,15 @@ public static class NodeRequest extends BaseNodeRequest { private List snapshots; - public NodeRequest() { + public NodeRequest(StreamInput in) throws IOException { + super(in); + snapshots = in.readList(Snapshot::new); } NodeRequest(TransportNodesSnapshotsStatus.Request request) { snapshots = Arrays.asList(request.snapshots); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - snapshots = in.readList(Snapshot::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -190,21 +191,8 @@ public static class NodeSnapshotStatus extends BaseNodeResponse { private Map> status; - NodeSnapshotStatus() { - } - - public NodeSnapshotStatus(DiscoveryNode node, Map> status) { - super(node); - this.status = status; - } - - public Map> status() { - return status; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public NodeSnapshotStatus(StreamInput in) throws IOException { + super(in); int numberOfSnapshots = in.readVInt(); Map> snapshotMapBuilder = new HashMap<>(numberOfSnapshots); for (int i = 0; i < numberOfSnapshots; i++) { @@ -213,7 +201,7 @@ public void readFrom(StreamInput in) throws IOException { Map shardMapBuilder = new HashMap<>(numberOfShards); for (int j = 0; j < numberOfShards; j++) { ShardId shardId = new ShardId(in); - SnapshotIndexShardStatus status = SnapshotIndexShardStatus.readShardSnapshotStatus(in); + SnapshotIndexShardStatus status = new SnapshotIndexShardStatus(in); shardMapBuilder.put(shardId, status); } snapshotMapBuilder.put(snapshot, unmodifiableMap(shardMapBuilder)); @@ -221,6 +209,15 @@ public void readFrom(StreamInput in) throws IOException { status = unmodifiableMap(snapshotMapBuilder); } + public NodeSnapshotStatus(DiscoveryNode node, Map> status) { + super(node); + this.status = status; + } + + public Map> status() { + return status; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 2566626506efd..ae143add71ace 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -32,6 +33,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -61,17 +63,16 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction buildResponse(request, snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())), @@ -195,7 +196,7 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li if (Strings.hasText(repositoryName) && request.snapshots() != null && request.snapshots().length > 0) { final Set requestedSnapshotNames = Sets.newHashSet(request.snapshots()); final RepositoryData repositoryData = snapshotsService.getRepositoryData(repositoryName); - final Map matchedSnapshotIds = repositoryData.getAllSnapshotIds().stream() + final Map matchedSnapshotIds = repositoryData.getSnapshotIds().stream() .filter(s -> requestedSnapshotNames.contains(s.getName())) .collect(Collectors.toMap(SnapshotId::getName, Function.identity())); for (final String snapshotName : request.snapshots()) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java index b8a08444efccd..918219f1b1bb4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.cluster.state; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ClusterStateAction extends StreamableResponseActionType { +public class ClusterStateAction extends ActionType { public static final ClusterStateAction INSTANCE = new ClusterStateAction(); public static final String NAME = "cluster:monitor/state"; private ClusterStateAction() { - super(NAME); - } - - @Override - public ClusterStateResponse newResponse() { - return new ClusterStateResponse(); + super(NAME, ClusterStateResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java index 7f07aeb364471..dbef3a4458945 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -186,9 +186,4 @@ public ClusterStateRequest waitForMetaDataVersion(long waitForMetaDataVersion) { this.waitForMetaDataVersion = waitForMetaDataVersion; return this; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index 06dd28a544d80..5680716aba07d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -40,7 +40,14 @@ public class ClusterStateResponse extends ActionResponse { private ClusterState clusterState; private boolean waitForTimedOut = false; - public ClusterStateResponse() { + public ClusterStateResponse(StreamInput in) throws IOException { + super(in); + clusterName = new ClusterName(in); + clusterState = in.readOptionalWriteable(innerIn -> ClusterState.readFrom(innerIn, null)); + if (in.getVersion().before(Version.V_7_0_0)) { + new ByteSizeValue(in); + } + waitForTimedOut = in.readBoolean(); } public ClusterStateResponse(ClusterName clusterName, ClusterState clusterState, boolean waitForTimedOut) { @@ -72,17 +79,6 @@ public boolean isWaitForTimedOut() { return waitForTimedOut; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - clusterName = new ClusterName(in); - clusterState = in.readOptionalWriteable(innerIn -> ClusterState.readFrom(innerIn, null)); - if (in.getVersion().before(Version.V_7_0_0)) { - new ByteSizeValue(in); - } - waitForTimedOut = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 9669abf8e931a..72944145f080b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; @@ -63,6 +64,11 @@ protected String executor() { return ThreadPool.Names.SAME; } + @Override + protected ClusterStateResponse read(StreamInput in) throws IOException { + return new ClusterStateResponse(in); + } + @Override protected ClusterBlockException checkBlock(ClusterStateRequest request, ClusterState state) { // cluster state calls are done also on a fully blocked cluster to figure out what is going @@ -72,11 +78,6 @@ protected ClusterBlockException checkBlock(ClusterStateRequest request, ClusterS return null; } - @Override - protected ClusterStateResponse newResponse() { - return new ClusterStateResponse(); - } - @Override protected void masterOperation(Task task, final ClusterStateRequest request, final ClusterState state, final ActionListener listener) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java index 0652ab35e266a..b506a29784051 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ClusterStatsAction extends StreamableResponseActionType { +public class ClusterStatsAction extends ActionType { public static final ClusterStatsAction INSTANCE = new ClusterStatsAction(); public static final String NAME = "cluster:monitor/stats"; private ClusterStatsAction() { - super(NAME); - } - - @Override - public ClusterStatsResponse newResponse() { - return new ClusterStatsResponse(); + super(NAME, ClusterStatsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index cdef2a03b534c..b32ed7e39d239 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -38,7 +38,19 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { private ShardStats[] shardsStats; private ClusterHealthStatus clusterStatus; - ClusterStatsNodeResponse() { + public ClusterStatsNodeResponse(StreamInput in) throws IOException { + super(in); + clusterStatus = null; + if (in.readBoolean()) { + clusterStatus = ClusterHealthStatus.fromValue(in.readByte()); + } + this.nodeInfo = new NodeInfo(in); + this.nodeStats = new NodeStats(in); + int size = in.readVInt(); + shardsStats = new ShardStats[size]; + for (int i = 0; i < size; i++) { + shardsStats[i] = new ShardStats(in); + } } public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, @@ -71,25 +83,7 @@ public ShardStats[] shardsStats() { } public static ClusterStatsNodeResponse readNodeResponse(StreamInput in) throws IOException { - ClusterStatsNodeResponse nodeResponse = new ClusterStatsNodeResponse(); - nodeResponse.readFrom(in); - return nodeResponse; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - clusterStatus = null; - if (in.readBoolean()) { - clusterStatus = ClusterHealthStatus.fromValue(in.readByte()); - } - this.nodeInfo = NodeInfo.readNodeInfo(in); - this.nodeStats = NodeStats.readNodeStats(in); - int size = in.readVInt(); - shardsStats = new ShardStats[size]; - for (int i = 0; i < size; i++) { - shardsStats[i] = ShardStats.readShardStats(in); - } + return new ClusterStatsNodeResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java index 845d305b8abe5..c79b2b73a8298 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -30,7 +30,8 @@ */ public class ClusterStatsRequest extends BaseNodesRequest { - public ClusterStatsRequest() { + public ClusterStatsRequest(StreamInput in) throws IOException { + super(in); } /** @@ -41,11 +42,6 @@ public ClusterStatsRequest(String... nodesIds) { super(nodesIds); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 372704afaf666..c13da033aec7b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -42,7 +42,11 @@ public class ClusterStatsResponse extends BaseNodesResponse readNodesFrom(StreamInput in) throws IO @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { // nodeStats and indicesStats are rebuilt from nodes - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index ef3bb6dfab24e..5ecf569397d4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -87,8 +87,8 @@ protected ClusterStatsNodeRequest newNodeRequest(ClusterStatsRequest request) { } @Override - protected ClusterStatsNodeResponse newNodeResponse() { - return new ClusterStatsNodeResponse(); + protected ClusterStatsNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new ClusterStatsNodeResponse(in); } @Override @@ -140,20 +140,15 @@ public static class ClusterStatsNodeRequest extends BaseNodeRequest { ClusterStatsRequest request; - public ClusterStatsNodeRequest() { + public ClusterStatsNodeRequest(StreamInput in) throws IOException { + super(in); + request = new ClusterStatsRequest(in); } ClusterStatsNodeRequest(ClusterStatsRequest request) { this.request = request; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = new ClusterStatsRequest(); - request.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java index 35814f2221f76..a52f47e27fef1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class DeleteStoredScriptAction extends ActionType { @@ -29,11 +28,7 @@ public class DeleteStoredScriptAction extends ActionType { public static final String NAME = "cluster:admin/script/delete"; private DeleteStoredScriptAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index 0d595cf92642d..b362ac8f6bcc7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -32,6 +32,11 @@ public class DeleteStoredScriptRequest extends AcknowledgedRequest { +public class GetStoredScriptAction extends ActionType { public static final GetStoredScriptAction INSTANCE = new GetStoredScriptAction(); public static final String NAME = "cluster:admin/script/get"; private GetStoredScriptAction() { - super(NAME); - } - - @Override - public GetStoredScriptResponse newResponse() { - return new GetStoredScriptResponse(); + super(NAME, GetStoredScriptResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java index 04991a8917526..f27c14cc8fb0a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java @@ -76,11 +76,6 @@ public GetStoredScriptRequest id(String id) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String toString() { return "get script [" + id + "]"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index 082e96c6272c5..e0058bba953f0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -65,7 +65,15 @@ public class GetStoredScriptResponse extends ActionResponse implements StatusToX private String id; private StoredScriptSource source; - GetStoredScriptResponse() { + public GetStoredScriptResponse(StreamInput in) throws IOException { + super(in); + + if (in.readBoolean()) { + source = new StoredScriptSource(in); + } else { + source = null; + } + id = in.readString(); } GetStoredScriptResponse(String id, StoredScriptSource source) { @@ -108,18 +116,6 @@ public static GetStoredScriptResponse fromXContent(XContentParser parser) throws return PARSER.parse(parser, null); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - - if (in.readBoolean()) { - source = new StoredScriptSource(in); - } else { - source = null; - } - id = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { if (source == null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java index 6cbf57f3bd891..f261bb6d2f2c2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class PutStoredScriptAction extends ActionType { @@ -30,11 +29,7 @@ public class PutStoredScriptAction extends ActionType { public static final String NAME = "cluster:admin/script/put"; private PutStoredScriptAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 9904fc5dcde2b..85ec9b14f097e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -43,6 +43,15 @@ public class PutStoredScriptRequest extends AcknowledgedRequest listener) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index 1e67e025b61b9..4de6dfef713dc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -28,11 +28,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class TransportGetStoredScriptAction extends TransportMasterNodeReadAction { @@ -53,8 +56,8 @@ protected String executor() { } @Override - protected GetStoredScriptResponse newResponse() { - return new GetStoredScriptResponse(); + protected GetStoredScriptResponse read(StreamInput in) throws IOException { + return new GetStoredScriptResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index ceabd31c45604..fbde84785ce40 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -46,7 +46,7 @@ public TransportPutStoredScriptAction(TransportService transportService, Cluster ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ScriptService scriptService) { super(PutStoredScriptAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, PutStoredScriptRequest::new); + PutStoredScriptRequest::new, indexNameExpressionResolver); this.scriptService = scriptService; } @@ -60,11 +60,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, PutStoredScriptRequest request, ClusterState state, ActionListener listener) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java index 240575bb1da45..3414002ea2b76 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.cluster.tasks; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class PendingClusterTasksAction extends StreamableResponseActionType { +public class PendingClusterTasksAction extends ActionType { public static final PendingClusterTasksAction INSTANCE = new PendingClusterTasksAction(); public static final String NAME = "cluster:monitor/task"; private PendingClusterTasksAction() { - super(NAME); - } - - @Override - public PendingClusterTasksResponse newResponse() { - return new PendingClusterTasksResponse(); + super(NAME, PendingClusterTasksResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index 417dbca60eefd..fa323c152d6b3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -35,7 +35,13 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera private List pendingTasks; - PendingClusterTasksResponse() { + public PendingClusterTasksResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + pendingTasks = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + pendingTasks.add(new PendingClusterTask(in)); + } } PendingClusterTasksResponse(List pendingTasks) { @@ -100,18 +106,6 @@ static final class Fields { } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - pendingTasks = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - PendingClusterTask task = new PendingClusterTask(); - task.readFrom(in); - pendingTasks.add(task); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(pendingTasks.size()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index d62e7c13247bc..4eada5d0bc6ef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -24,15 +24,16 @@ import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.List; public class TransportPendingClusterTasksAction @@ -45,7 +46,7 @@ public TransportPendingClusterTasksAction(TransportService transportService, Clu ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters, - PendingClusterTasksRequest::new, indexNameExpressionResolver); + PendingClusterTasksRequest::new, indexNameExpressionResolver); this.clusterService = clusterService; } @@ -56,13 +57,13 @@ protected String executor() { } @Override - protected ClusterBlockException checkBlock(PendingClusterTasksRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + protected PendingClusterTasksResponse read(StreamInput in) throws IOException { + return new PendingClusterTasksResponse(in); } @Override - protected PendingClusterTasksResponse newResponse() { - return new PendingClusterTasksResponse(); + protected ClusterBlockException checkBlock(PendingClusterTasksRequest request, ClusterState state) { + return null; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java index 9d9f4c617265c..f118441771839 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,7 +42,7 @@ /** * Represents an alias, to be associated with an index */ -public class Alias implements Streamable, ToXContentFragment { +public class Alias implements Writeable, ToXContentFragment { private static final ParseField FILTER = new ParseField("filter"); private static final ParseField ROUTING = new ParseField("routing"); @@ -64,8 +64,12 @@ public class Alias implements Streamable, ToXContentFragment { @Nullable private Boolean writeIndex; - private Alias() { - + public Alias(StreamInput in) throws IOException { + name = in.readString(); + filter = in.readOptionalString(); + indexRouting = in.readOptionalString(); + searchRouting = in.readOptionalString(); + writeIndex = in.readOptionalBoolean(); } public Alias(String name) { @@ -185,24 +189,6 @@ public Alias writeIndex(@Nullable Boolean writeIndex) { return this; } - /** - * Allows to read an alias from the provided input stream - */ - public static Alias read(StreamInput in) throws IOException { - Alias alias = new Alias(); - alias.readFrom(in); - return alias; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - filter = in.readOptionalString(); - indexRouting = in.readOptionalString(); - searchRouting = in.readOptionalString(); - writeIndex = in.readOptionalBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java index fe18bd9315459..b6acb50421198 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class IndicesAliasesAction extends ActionType { @@ -29,11 +28,7 @@ public class IndicesAliasesAction extends ActionType { public static final String NAME = "indices:admin/aliases"; private IndicesAliasesAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 4fe3094f33cef..eede7ecf4a408 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -70,6 +70,16 @@ public class IndicesAliasesRequest extends AcknowledgedRequest requestValidators) { - super(IndicesAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - IndicesAliasesRequest::new); + super(IndicesAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesAliasesRequest::new, + indexNameExpressionResolver); this.indexAliasesService = indexAliasesService; this.requestValidators = Objects.requireNonNull(requestValidators); } @@ -90,11 +90,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterBlockException checkBlock(IndicesAliasesRequest request, ClusterState state) { Set indices = new HashSet<>(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java index 2798f21e820a4..cd0b1352fc3c6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.alias.get; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class GetAliasesAction extends StreamableResponseActionType { +public class GetAliasesAction extends ActionType { public static final GetAliasesAction INSTANCE = new GetAliasesAction(); public static final String NAME = "indices:admin/aliases/get"; private GetAliasesAction() { - super(NAME); - } - - @Override - public GetAliasesResponse newResponse() { - return new GetAliasesResponse(); + super(NAME, GetAliasesResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index 40fae0d9543c6..d087711b9873a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -113,9 +113,4 @@ public IndicesOptions indicesOptions() { public ActionRequestValidationException validate() { return null; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index c84eb7f0b99f1..3376d9c810043 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -40,16 +40,8 @@ public GetAliasesResponse(ImmutableOpenMap> aliases) this.aliases = aliases; } - GetAliasesResponse() { - } - - public ImmutableOpenMap> getAliases() { - return aliases; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public GetAliasesResponse(StreamInput in) throws IOException { + super(in); int size = in.readVInt(); ImmutableOpenMap.Builder> aliasesBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < size; i++) { @@ -64,6 +56,10 @@ public void readFrom(StreamInput in) throws IOException { aliases = aliasesBuilder.build(); } + public ImmutableOpenMap> getAliases() { + return aliases; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(aliases.size()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 30de49031e101..0e53edeb46705 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -29,10 +29,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.Collections; import java.util.List; @@ -59,8 +61,8 @@ protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterSta } @Override - protected GetAliasesResponse newResponse() { - return new GetAliasesResponse(); + protected GetAliasesResponse read(StreamInput in) throws IOException { + return new GetAliasesResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index 366a5dafa2aa4..51ac4bee89266 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -19,9 +19,10 @@ package org.elasticsearch.action.admin.indices.analyze; -import org.elasticsearch.action.ActionType; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -51,12 +52,7 @@ public class AnalyzeAction extends ActionType { public static final String NAME = "indices:admin/analyze"; private AnalyzeAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, AnalyzeAction.Response::new); } /** @@ -292,30 +288,31 @@ public static class Response extends ActionResponse implements ToXContentObject private final List tokens; public Response(List tokens, DetailAnalyzeResponse detail) { + if (tokens == null && detail == null) { + throw new IllegalArgumentException("Neither token nor detail set on AnalysisAction.Response"); + } this.tokens = tokens; this.detail = detail; } public Response(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - if (size > 0) { - tokens = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - tokens.add(new AnalyzeToken(in)); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); + tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; + } else { + int size = in.readVInt(); + if (size > 0) { + tokens = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + tokens.add(new AnalyzeToken(in)); + } + } else { + tokens = null; } } - else { - tokens = null; - } detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - public List getTokens() { return this.tokens; } @@ -346,21 +343,33 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - if (tokens != null) { - out.writeVInt(tokens.size()); - for (AnalyzeToken token : tokens) { - token.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + AnalyzeToken[] tokenArray = null; + if (tokens != null) { + tokenArray = tokens.toArray(new AnalyzeToken[0]); } + out.writeOptionalArray(tokenArray); } else { - out.writeVInt(0); + if (tokens != null) { + out.writeVInt(tokens.size()); + for (AnalyzeToken token : tokens) { + token.writeTo(out); + } + } else { + out.writeVInt(0); + } } out.writeOptionalWriteable(detail); } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } Response that = (Response) o; return Objects.equals(detail, that.detail) && Objects.equals(tokens, that.tokens); @@ -401,8 +410,12 @@ public static class AnalyzeToken implements Writeable, ToXContentObject { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } AnalyzeToken that = (AnalyzeToken) o; return startOffset == that.startOffset && endOffset == that.endOffset && @@ -582,8 +595,12 @@ public AnalyzeTokenList[] tokenfilters() { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } DetailAnalyzeResponse that = (DetailAnalyzeResponse) o; return customAnalyzer == that.customAnalyzer && Objects.equals(analyzer, that.analyzer) && @@ -669,8 +686,12 @@ public static class AnalyzeTokenList implements Writeable, ToXContentObject { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } AnalyzeTokenList that = (AnalyzeTokenList) o; return Objects.equals(name, that.name) && Arrays.equals(tokens, that.tokens); @@ -690,16 +711,19 @@ public AnalyzeTokenList(String name, AnalyzeToken[] tokens) { AnalyzeTokenList(StreamInput in) throws IOException { name = in.readString(); - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = new AnalyzeToken(in); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); + } else { + int size = in.readVInt(); + if (size > 0) { + tokens = new AnalyzeToken[size]; + for (int i = 0; i < size; i++) { + tokens[i] = new AnalyzeToken(in); + } + } else { + tokens = null; } } - else { - tokens = null; - } } public String getName() { @@ -732,13 +756,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (tokens != null) { - out.writeVInt(tokens.length); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + out.writeOptionalArray(tokens); } else { - out.writeVInt(0); + if (tokens != null) { + out.writeVInt(tokens.length); + for (AnalyzeToken token : tokens) { + token.writeTo(out); + } + } else { + out.writeVInt(0); + } } } } @@ -789,8 +817,12 @@ public void writeTo(StreamOutput out) throws IOException { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } CharFilteredText that = (CharFilteredText) o; return Objects.equals(name, that.name) && Arrays.equals(texts, that.texts); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java index 8f020df00edda..6c098353ec11b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.cache.clear; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ClearIndicesCacheAction extends StreamableResponseActionType { +public class ClearIndicesCacheAction extends ActionType { public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction(); public static final String NAME = "indices:admin/cache/clear"; private ClearIndicesCacheAction() { - super(NAME); - } - - @Override - public ClearIndicesCacheResponse newResponse() { - return new ClearIndicesCacheResponse(); + super(NAME, ClearIndicesCacheResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index 194539d78f7be..252a92d7a57bd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -33,8 +33,12 @@ public class ClearIndicesCacheRequest extends BroadcastRequest { @@ -28,11 +27,6 @@ public class CloseIndexAction extends ActionType { public static final String NAME = "indices:admin/close"; private CloseIndexAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return CloseIndexResponse::new; + super(NAME, CloseIndexResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java index 351194f9a789c..830f5f18e00fb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java @@ -42,6 +42,17 @@ public class CloseIndexRequest extends AcknowledgedRequest im private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + public CloseIndexRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + waitForActiveShards = ActiveShardCount.readFrom(in); + } else { + waitForActiveShards = ActiveShardCount.NONE; + } + } + public CloseIndexRequest() { } @@ -113,18 +124,6 @@ public CloseIndexRequest waitForActiveShards(final ActiveShardCount waitForActiv return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(Version.V_7_2_0)) { - waitForActiveShards = ActiveShardCount.readFrom(in); - } else { - waitForActiveShards = ActiveShardCount.NONE; - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index 925b9dd4d5231..72c8f2f3efe39 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -238,7 +238,9 @@ public static class Failure extends DefaultShardOperationFailedException { private @Nullable String nodeId; - private Failure() { + private Failure(StreamInput in) throws IOException { + super(in); + nodeId = in.readOptionalString(); } public Failure(final String index, final int shardId, final Throwable reason) { @@ -254,12 +256,6 @@ public String getNodeId() { return nodeId; } - @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - nodeId = in.readOptionalString(); - } - @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); @@ -280,9 +276,7 @@ public String toString() { } static Failure readFailure(final StreamInput in) throws IOException { - final Failure failure = new Failure(); - failure.readFrom(in); - return failure; + return new Failure(in); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index d653f000d0bc5..e28dddbbb0176 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -60,8 +60,8 @@ public TransportCloseIndexAction(Settings settings, TransportService transportSe ThreadPool threadPool, MetaDataIndexStateService indexStateService, ClusterSettings clusterSettings, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { - super(CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - CloseIndexRequest::new); + super(CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, CloseIndexRequest::new, + indexNameExpressionResolver); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings); @@ -78,11 +78,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected CloseIndexResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected CloseIndexResponse read(StreamInput in) throws IOException { return new CloseIndexResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index 79cbab4781947..bbd55e27e61fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationOperation; @@ -52,6 +53,7 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA TransportVerifyShardBeforeCloseAction.ShardRequest, TransportVerifyShardBeforeCloseAction.ShardRequest, ReplicationResponse> { public static final String NAME = CloseIndexAction.NAME + "[s]"; + public static final ActionType TYPE = new ActionType<>(NAME, ReplicationResponse::new); protected Logger logger = LogManager.getLogger(getClass()); @Inject @@ -64,8 +66,8 @@ public TransportVerifyShardBeforeCloseAction(final Settings settings, final Tran } @Override - protected ReplicationResponse newResponseInstance() { - return new ReplicationResponse(); + protected ReplicationResponse newResponseInstance(StreamInput in) throws IOException { + return new ReplicationResponse(in); } @Override @@ -171,11 +173,6 @@ public String toString() { return "verify shard " + shardId + " before close with block " + clusterBlock; } - @Override - public void readFrom(final StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java index cf7c7d08e0f72..4590302292497 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class CreateIndexAction extends ActionType { @@ -28,11 +27,7 @@ public class CreateIndexAction extends ActionType { public static final String NAME = "indices:admin/create"; private CreateIndexAction() { - super(NAME); + super(NAME, CreateIndexResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return CreateIndexResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index cc90eb1c32e0a..177fafeac4f11 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -87,6 +87,24 @@ public class CreateIndexRequest extends AcknowledgedRequest private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + public CreateIndexRequest(StreamInput in) throws IOException { + super(in); + cause = in.readString(); + index = in.readString(); + settings = readSettingsFromStream(in); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + final String type = in.readString(); + String source = in.readString(); + mappings.put(type, source); + } + int aliasesSize = in.readVInt(); + for (int i = 0; i < aliasesSize; i++) { + aliases.add(new Alias(in)); + } + waitForActiveShards = ActiveShardCount.readFrom(in); + } + public CreateIndexRequest() { } @@ -430,26 +448,6 @@ public CreateIndexRequest waitForActiveShards(final int waitForActiveShards) { return waitForActiveShards(ActiveShardCount.from(waitForActiveShards)); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - cause = in.readString(); - index = in.readString(); - settings = readSettingsFromStream(in); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - final String type = in.readString(); - String source = in.readString(); - mappings.put(type, source); - } - int aliasesSize = in.readVInt(); - for (int i = 0; i < aliasesSize; i++) { - aliases.add(Alias.read(in)); - } - waitForActiveShards = ActiveShardCount.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index fdf8cc0c00c25..92d897e3f0348 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -47,8 +47,8 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction { @@ -29,11 +28,7 @@ public class DeleteIndexAction extends ActionType { public static final String NAME = "indices:admin/delete"; private DeleteIndexAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java index 59300cfcecb3a..debda0c84d411 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -40,6 +40,12 @@ public class DeleteIndexRequest extends AcknowledgedRequest // Delete index should work by default on both open and closed indices. private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); + public DeleteIndexRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + public DeleteIndexRequest() { } @@ -94,13 +100,6 @@ public String[] indices() { return indices; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 384452ab8c945..19341b63f172d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -56,8 +56,8 @@ public TransportDeleteIndexAction(TransportService transportService, ClusterServ MetaDataDeleteIndexService deleteIndexService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { - super(DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - DeleteIndexRequest::new); + super(DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, DeleteIndexRequest::new, + indexNameExpressionResolver ); this.deleteIndexService = deleteIndexService; this.destructiveOperations = destructiveOperations; } @@ -72,11 +72,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void doExecute(Task task, DeleteIndexRequest request, ActionListener listener) { destructiveOperations.failDestructive(request.indices()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java index aab1dc775b99e..214af61a7cadb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class FlushAction extends StreamableResponseActionType { +public class FlushAction extends ActionType { public static final FlushAction INSTANCE = new FlushAction(); public static final String NAME = "indices:admin/flush"; private FlushAction() { - super(NAME); - } - - @Override - public FlushResponse newResponse() { - return new FlushResponse(); + super(NAME, FlushResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java index a6a72b92ce75b..5b3fd9657213b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java @@ -107,11 +107,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(waitIfOngoing); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String toString() { return "FlushRequest{" + diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java index c64abe619736e..e6632e59d9e97 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java @@ -21,9 +21,11 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; +import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -43,8 +45,8 @@ public class FlushResponse extends BroadcastResponse { declareBroadcastFields(PARSER); } - FlushResponse() { - + FlushResponse(StreamInput in) throws IOException { + super(in); } FlushResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java index 8bd3597eba9bc..67ba414fc7229 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java @@ -46,11 +46,6 @@ FlushRequest getRequest() { return request; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java index 70153b767eef1..d5180c799ade7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java @@ -19,20 +19,15 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class SyncedFlushAction extends StreamableResponseActionType { +public class SyncedFlushAction extends ActionType { public static final SyncedFlushAction INSTANCE = new SyncedFlushAction(); public static final String NAME = "indices:admin/synced_flush"; private SyncedFlushAction() { - super(NAME); - } - - @Override - public SyncedFlushResponse newResponse() { - return new SyncedFlushResponse(); + super(NAME, SyncedFlushResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java index 2862ae10e0502..cb3333354b8e1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java @@ -20,7 +20,9 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import java.io.IOException; import java.util.Arrays; /** @@ -43,6 +45,9 @@ public SyncedFlushRequest(String... indices) { super(indices); } + public SyncedFlushRequest(StreamInput in) throws IOException { + super(in); + } @Override public String toString() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java index ed05ec096df2b..5e286b184fecc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java @@ -22,7 +22,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -44,12 +44,8 @@ */ public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment { - Map> shardsResultPerIndex; - ShardCounts shardCounts; - - SyncedFlushResponse() { - - } + private final Map> shardsResultPerIndex; + private final ShardCounts shardCounts; public SyncedFlushResponse(Map> shardsResultPerIndex) { // shardsResultPerIndex is never modified after it is passed to this @@ -59,6 +55,23 @@ public SyncedFlushResponse(Map> shardsResu this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); } + public SyncedFlushResponse(StreamInput in) throws IOException { + super(in); + shardCounts = new ShardCounts(in); + Map> tmpShardsResultPerIndex = new HashMap<>(); + int numShardsResults = in.readInt(); + for (int i =0 ; i< numShardsResults; i++) { + String index = in.readString(); + List shardsSyncedFlushResults = new ArrayList<>(); + int numShards = in.readInt(); + for (int j =0; j< numShards; j++) { + shardsSyncedFlushResults.add(new ShardsSyncedFlushResult(in)); + } + tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults); + } + shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex); + } + /** * total number shards, including replicas, both assigned and unassigned */ @@ -140,11 +153,11 @@ static ShardCounts calculateShardCounts(Iterable result return new ShardCounts(total, successful, failed); } - static final class ShardCounts implements ToXContentFragment, Streamable { + static final class ShardCounts implements ToXContentFragment, Writeable { - public int total; - public int successful; - public int failed; + public final int total; + public final int successful; + public final int failed; ShardCounts(int total, int successful, int failed) { this.total = total; @@ -152,8 +165,10 @@ static final class ShardCounts implements ToXContentFragment, Streamable { this.failed = failed; } - ShardCounts() { - + ShardCounts(StreamInput in) throws IOException { + total = in.readInt(); + successful = in.readInt(); + failed = in.readInt(); } @Override @@ -164,13 +179,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - total = in.readInt(); - successful = in.readInt(); - failed = in.readInt(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeInt(total); @@ -190,25 +198,6 @@ static final class Fields { static final String REASON = "reason"; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardCounts = new ShardCounts(); - shardCounts.readFrom(in); - Map> tmpShardsResultPerIndex = new HashMap<>(); - int numShardsResults = in.readInt(); - for (int i =0 ; i< numShardsResults; i++) { - String index = in.readString(); - List shardsSyncedFlushResults = new ArrayList<>(); - int numShards = in.readInt(); - for (int j =0; j< numShards; j++) { - shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in)); - } - tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults); - } - shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex); - } - @Override public void writeTo(StreamOutput out) throws IOException { shardCounts.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 0f37acf0ad325..141df61a8dab0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -38,11 +39,10 @@ public class TransportFlushAction extends TransportBroadcastReplicationAction { @Inject - public TransportFlushAction(ClusterService clusterService, TransportService transportService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportShardFlushAction replicatedFlushAction) { - super(FlushAction.NAME, FlushRequest::new, clusterService, transportService, actionFilters, indexNameExpressionResolver, - replicatedFlushAction); + public TransportFlushAction(ClusterService clusterService, TransportService transportService, NodeClient client, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(FlushAction.NAME, FlushRequest::new, clusterService, transportService, client, actionFilters, indexNameExpressionResolver, + TransportShardFlushAction.TYPE); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index a07dee9613a1b..4b01329e8b4ff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; @@ -27,16 +28,20 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class TransportShardFlushAction extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; + public static final ActionType TYPE = new ActionType<>(NAME, ReplicationResponse::new); @Inject public TransportShardFlushAction(Settings settings, TransportService transportService, ClusterService clusterService, @@ -47,8 +52,8 @@ public TransportShardFlushAction(Settings settings, TransportService transportSe } @Override - protected ReplicationResponse newResponseInstance() { - return new ReplicationResponse(); + protected ReplicationResponse newResponseInstance(StreamInput in) throws IOException { + return new ReplicationResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java index 6d393734608e3..3eb72e0b02277 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java @@ -27,8 +27,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; -import java.util.function.Supplier; - /** * Synced flush Action. */ @@ -39,8 +37,7 @@ public class TransportSyncedFlushAction extends HandledTransportAction) SyncedFlushRequest::new); + super(SyncedFlushAction.NAME, transportService, actionFilters, SyncedFlushRequest::new); this.syncedFlushService = syncedFlushService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java index 98d8c61dad788..7319c5998b4c3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ForceMergeAction extends StreamableResponseActionType { +public class ForceMergeAction extends ActionType { public static final ForceMergeAction INSTANCE = new ForceMergeAction(); public static final String NAME = "indices:admin/forcemerge"; private ForceMergeAction() { - super(NAME); - } - - @Override - public ForceMergeResponse newResponse() { - return new ForceMergeResponse(); + super(NAME, ForceMergeResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 91f965d6268da..b7fa9094540a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -58,8 +58,11 @@ public ForceMergeRequest(String... indices) { super(indices); } - public ForceMergeRequest() { - + public ForceMergeRequest(StreamInput in) throws IOException { + super(in); + maxNumSegments = in.readInt(); + onlyExpungeDeletes = in.readBoolean(); + flush = in.readBoolean(); } /** @@ -111,14 +114,6 @@ public ForceMergeRequest flush(boolean flush) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - maxNumSegments = in.readInt(); - onlyExpungeDeletes = in.readBoolean(); - flush = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java index 6ebbbbd34cd5b..a8860e61b9b5f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -21,9 +21,11 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; +import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -43,7 +45,8 @@ public class ForceMergeResponse extends BroadcastResponse { declareBroadcastFields(PARSER); } - ForceMergeResponse() { + ForceMergeResponse(StreamInput in) throws IOException { + super(in); } ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index a7d4fd8cf4c33..3affb65ffef95 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -69,9 +69,7 @@ protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalSha @Override protected ForceMergeRequest readRequestFrom(StreamInput in) throws IOException { - final ForceMergeRequest request = new ForceMergeRequest(); - request.readFrom(in); - return request; + return new ForceMergeRequest(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java index c8a9eb85068d0..143defeb38eeb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.get; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class GetIndexAction extends StreamableResponseActionType { +public class GetIndexAction extends ActionType { public static final GetIndexAction INSTANCE = new GetIndexAction(); public static final String NAME = "indices:admin/get"; private GetIndexAction() { - super(NAME); - } - - @Override - public GetIndexResponse newResponse() { - return new GetIndexResponse(); + super(NAME, GetIndexResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index ed0ceba2b5cd5..8474eba44aca6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -139,11 +139,6 @@ public boolean includeDefaults() { return includeDefaults; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index a8a0c61116582..f20c6c3c7da59 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -81,7 +81,50 @@ public GetIndexResponse(String[] indices, } } - GetIndexResponse() { + GetIndexResponse(StreamInput in) throws IOException { + super(in); + this.indices = in.readStringArray(); + + int mappingsSize = in.readVInt(); + ImmutableOpenMap.Builder> mappingsMapBuilder = ImmutableOpenMap.builder(); + for (int i = 0; i < mappingsSize; i++) { + String key = in.readString(); + int valueSize = in.readVInt(); + ImmutableOpenMap.Builder mappingEntryBuilder = ImmutableOpenMap.builder(); + for (int j = 0; j < valueSize; j++) { + mappingEntryBuilder.put(in.readString(), new MappingMetaData(in)); + } + mappingsMapBuilder.put(key, mappingEntryBuilder.build()); + } + mappings = mappingsMapBuilder.build(); + + int aliasesSize = in.readVInt(); + ImmutableOpenMap.Builder> aliasesMapBuilder = ImmutableOpenMap.builder(); + for (int i = 0; i < aliasesSize; i++) { + String key = in.readString(); + int valueSize = in.readVInt(); + List aliasEntryBuilder = new ArrayList<>(valueSize); + for (int j = 0; j < valueSize; j++) { + aliasEntryBuilder.add(new AliasMetaData(in)); + } + aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder)); + } + aliases = aliasesMapBuilder.build(); + + int settingsSize = in.readVInt(); + ImmutableOpenMap.Builder settingsMapBuilder = ImmutableOpenMap.builder(); + for (int i = 0; i < settingsSize; i++) { + String key = in.readString(); + settingsMapBuilder.put(key, Settings.readSettingsFromStream(in)); + } + settings = settingsMapBuilder.build(); + + ImmutableOpenMap.Builder defaultSettingsMapBuilder = ImmutableOpenMap.builder(); + int defaultSettingsSize = in.readVInt(); + for (int i = 0; i < defaultSettingsSize; i++) { + defaultSettingsMapBuilder.put(in.readString(), Settings.readSettingsFromStream(in)); + } + defaultSettings = defaultSettingsMapBuilder.build(); } public String[] indices() { @@ -152,53 +195,6 @@ public String getSetting(String index, String setting) { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.indices = in.readStringArray(); - - int mappingsSize = in.readVInt(); - ImmutableOpenMap.Builder> mappingsMapBuilder = ImmutableOpenMap.builder(); - for (int i = 0; i < mappingsSize; i++) { - String key = in.readString(); - int valueSize = in.readVInt(); - ImmutableOpenMap.Builder mappingEntryBuilder = ImmutableOpenMap.builder(); - for (int j = 0; j < valueSize; j++) { - mappingEntryBuilder.put(in.readString(), new MappingMetaData(in)); - } - mappingsMapBuilder.put(key, mappingEntryBuilder.build()); - } - mappings = mappingsMapBuilder.build(); - - int aliasesSize = in.readVInt(); - ImmutableOpenMap.Builder> aliasesMapBuilder = ImmutableOpenMap.builder(); - for (int i = 0; i < aliasesSize; i++) { - String key = in.readString(); - int valueSize = in.readVInt(); - List aliasEntryBuilder = new ArrayList<>(valueSize); - for (int j = 0; j < valueSize; j++) { - aliasEntryBuilder.add(new AliasMetaData(in)); - } - aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder)); - } - aliases = aliasesMapBuilder.build(); - - int settingsSize = in.readVInt(); - ImmutableOpenMap.Builder settingsMapBuilder = ImmutableOpenMap.builder(); - for (int i = 0; i < settingsSize; i++) { - String key = in.readString(); - settingsMapBuilder.put(key, Settings.readSettingsFromStream(in)); - } - settings = settingsMapBuilder.build(); - - ImmutableOpenMap.Builder defaultSettingsMapBuilder = ImmutableOpenMap.builder(); - int defaultSettingsSize = in.readVInt(); - for (int i = 0; i < defaultSettingsSize; i++) { - defaultSettingsMapBuilder.put(in.readString(), Settings.readSettingsFromStream(in)); - } - defaultSettings = defaultSettingsMapBuilder.build(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 8f32fcb155112..6fb6da59735ef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; @@ -78,8 +79,8 @@ protected ClusterBlockException checkBlock(GetIndexRequest request, ClusterState } @Override - protected GetIndexResponse newResponse() { - return new GetIndexResponse(); + protected GetIndexResponse read(StreamInput in) throws IOException { + return new GetIndexResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java index a16c6d4c532f4..22cb9c859233d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class GetFieldMappingsAction extends ActionType { @@ -28,11 +27,7 @@ public class GetFieldMappingsAction extends ActionType public static final String NAME = "indices:admin/mappings/fields/get"; private GetFieldMappingsAction() { - super(NAME); + super(NAME, GetFieldMappingsResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return GetFieldMappingsResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index dde7c13e2c01a..7442fe2dd3597 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -48,8 +48,16 @@ public class GetFieldMappingsRequest extends ActionRequest implements IndicesReq private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); - public GetFieldMappingsRequest() { + public GetFieldMappingsRequest() {} + public GetFieldMappingsRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + types = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + local = in.readBoolean(); + fields = in.readStringArray(); + includeDefaults = in.readBoolean(); } /** @@ -130,15 +138,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(fields); out.writeBoolean(includeDefaults); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - types = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - local = in.readBoolean(); - fields = in.readStringArray(); - includeDefaults = in.readBoolean(); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index e984e214e6fee..28ff148c5e2c7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -43,7 +43,6 @@ import java.util.Map; import java.util.Objects; -import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -86,16 +85,12 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte }, MAPPINGS, ObjectParser.ValueType.OBJECT); } - private Map>> mappings = emptyMap(); + private final Map>> mappings; GetFieldMappingsResponse(Map>> mappings) { this.mappings = mappings; } - - GetFieldMappingsResponse() { - } - GetFieldMappingsResponse(StreamInput in) throws IOException { super(in); int size = in.readVInt(); @@ -291,11 +286,6 @@ public int hashCode() { } } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(mappings.size()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java index 332fe97bca6e2..96f6fe9962ff7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class GetMappingsAction extends StreamableResponseActionType { +public class GetMappingsAction extends ActionType { public static final GetMappingsAction INSTANCE = new GetMappingsAction(); public static final String NAME = "indices:admin/mappings/get"; private GetMappingsAction() { - super(NAME); - } - - @Override - public GetMappingsResponse newResponse() { - return new GetMappingsResponse(); + super(NAME, GetMappingsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index 73a04e466300f..67bc872685211 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -47,20 +47,8 @@ public GetMappingsResponse(ImmutableOpenMap> mappings() { - return mappings; - } - - public ImmutableOpenMap> getMappings() { - return mappings(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + GetMappingsResponse(StreamInput in) throws IOException { + super(in); int size = in.readVInt(); ImmutableOpenMap.Builder> indexMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < size; i++) { @@ -75,6 +63,14 @@ public void readFrom(StreamInput in) throws IOException { mappings = indexMapBuilder.build(); } + public ImmutableOpenMap> mappings() { + return mappings; + } + + public ImmutableOpenMap> getMappings() { + return mappings(); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(mappings.size()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index 97c6493dfd950..c95435834b499 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceArray; +import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; public class TransportGetFieldMappingsAction extends HandledTransportAction { @@ -62,7 +63,7 @@ protected void doExecute(Task task, GetFieldMappingsRequest request, final Actio final AtomicReferenceArray indexResponses = new AtomicReferenceArray<>(concreteIndices.length); if (concreteIndices.length == 0) { - listener.onResponse(new GetFieldMappingsResponse()); + listener.onResponse(new GetFieldMappingsResponse(emptyMap())); } else { boolean probablySingleFieldRequest = concreteIndices.length == 1 && request.types().length == 1 && request.fields().length == 1; for (final String index : concreteIndices) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index e81785266bb26..0dda5fc8cfe16 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -62,8 +63,8 @@ protected ClusterBlockException checkBlock(GetMappingsRequest request, ClusterSt } @Override - protected GetMappingsResponse newResponse() { - return new GetMappingsResponse(); + protected GetMappingsResponse read(StreamInput in) throws IOException { + return new GetMappingsResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java index 97d908f01eec1..3fab105e42976 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class PutMappingAction extends ActionType { @@ -29,11 +28,7 @@ public class PutMappingAction extends ActionType { public static final String NAME = "indices:admin/mapping/put"; private PutMappingAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index cd88947482747..a26a8c0b32fe8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -76,6 +76,16 @@ public class PutMappingRequest extends AcknowledgedRequest im private Index concreteIndex; + public PutMappingRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + type = in.readOptionalString(); + source = in.readString(); + concreteIndex = in.readOptionalWriteable(Index::new); + origin = in.readOptionalString(); + } + public PutMappingRequest() { } @@ -299,17 +309,6 @@ public PutMappingRequest source(BytesReference mappingSource, XContentType xCont } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - type = in.readOptionalString(); - source = in.readString(); - concreteIndex = in.readOptionalWriteable(Index::new); - origin = in.readOptionalString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index f2a21858f5330..41d807a0cf48c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -61,8 +61,8 @@ public TransportPutMappingAction( final ActionFilters actionFilters, final IndexNameExpressionResolver indexNameExpressionResolver, final RequestValidators requestValidators) { - super(PutMappingAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - PutMappingRequest::new); + super(PutMappingAction.NAME, transportService, clusterService, threadPool, actionFilters, PutMappingRequest::new, + indexNameExpressionResolver); this.metaDataMappingService = metaDataMappingService; this.requestValidators = Objects.requireNonNull(requestValidators); } @@ -78,11 +78,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterBlockException checkBlock(PutMappingRequest request, ClusterState state) { String[] indices; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java index 3dd871e97604c..b2798a0ecc98d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.open; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class OpenIndexAction extends ActionType { @@ -28,11 +27,7 @@ public class OpenIndexAction extends ActionType { public static final String NAME = "indices:admin/open"; private OpenIndexAction() { - super(NAME); + super(NAME, OpenIndexResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return OpenIndexResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java index 1afb9ccc67d5c..f506ef5b2ae0b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java @@ -41,6 +41,13 @@ public class OpenIndexRequest extends AcknowledgedRequest impl private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, false, true); private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + public OpenIndexRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + waitForActiveShards = ActiveShardCount.readFrom(in); + } + public OpenIndexRequest() { } @@ -135,14 +142,6 @@ public OpenIndexRequest waitForActiveShards(final int waitForActiveShards) { return waitForActiveShards(ActiveShardCount.from(waitForActiveShards)); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - waitForActiveShards = ActiveShardCount.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index c815a6b1132d7..53c015596049c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -53,8 +53,8 @@ public TransportOpenIndexAction(TransportService transportService, ClusterServic ThreadPool threadPool, MetaDataIndexStateService indexStateService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { - super(OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - OpenIndexRequest::new); + super(OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, OpenIndexRequest::new, + indexNameExpressionResolver); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; } @@ -65,11 +65,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected OpenIndexResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected OpenIndexResponse read(StreamInput in) throws IOException { return new OpenIndexResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java index e41a766aa4b08..84f166ed6616e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java @@ -19,22 +19,17 @@ package org.elasticsearch.action.admin.indices.recovery; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * Recovery information action */ -public class RecoveryAction extends StreamableResponseActionType { +public class RecoveryAction extends ActionType { public static final RecoveryAction INSTANCE = new RecoveryAction(); public static final String NAME = "indices:monitor/recovery"; private RecoveryAction() { - super(NAME); - } - - @Override - public RecoveryResponse newResponse() { - return new RecoveryResponse(); + super(NAME, RecoveryResponse::new); } } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryRequest.java index 78d2969c2fde4..dca11b0d99acd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryRequest.java @@ -42,6 +42,12 @@ public RecoveryRequest() { this(Strings.EMPTY_ARRAY); } + public RecoveryRequest(StreamInput in) throws IOException { + super(in); + detailed = in.readBoolean(); + activeOnly = in.readBoolean(); + } + /** * Constructs a request for recovery information for all shards for the given indices * @@ -95,11 +101,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(detailed); out.writeBoolean(activeOnly); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - detailed = in.readBoolean(); - activeOnly = in.readBoolean(); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index 7c51edc4d957e..c07071735e47e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -40,7 +40,19 @@ public class RecoveryResponse extends BroadcastResponse { private Map> shardRecoveryStates = new HashMap<>(); - public RecoveryResponse() { } + public RecoveryResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + String s = in.readString(); + int listSize = in.readVInt(); + List list = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + list.add(RecoveryState.readRecoveryState(in)); + } + shardRecoveryStates.put(s, list); + } + } /** * Constructs recovery information for a collection of indices and associated shards. Keeps track of how many total shards @@ -103,21 +115,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - String s = in.readString(); - int listSize = in.readVInt(); - List list = new ArrayList<>(listSize); - for (int j = 0; j < listSize; j++) { - list.add(RecoveryState.readRecoveryState(in)); - } - shardRecoveryStates.put(s, list); - } - } - @Override public String toString() { return Strings.toString(this, true, true); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 0ff31f42b9295..f948b6dd94a63 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -93,9 +93,7 @@ protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, @Override protected RecoveryRequest readRequestFrom(StreamInput in) throws IOException { - final RecoveryRequest recoveryRequest = new RecoveryRequest(); - recoveryRequest.readFrom(in); - return recoveryRequest; + return new RecoveryRequest(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java index f26d899371ecb..27e27c71b3aea 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class RefreshAction extends StreamableResponseActionType { +public class RefreshAction extends ActionType { public static final RefreshAction INSTANCE = new RefreshAction(); public static final String NAME = "indices:admin/refresh"; private RefreshAction() { - super(NAME); - } - - @Override - public RefreshResponse newResponse() { - return new RefreshResponse(); + super(NAME, RefreshResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java index 20165d078c5c9..b220db2d21ed6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java @@ -21,9 +21,11 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; +import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -43,7 +45,8 @@ public class RefreshResponse extends BroadcastResponse { declareBroadcastFields(PARSER); } - RefreshResponse() { + RefreshResponse(StreamInput in) throws IOException { + super(in); } RefreshResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 051c1c7b3ca26..72abe9f548141 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -41,10 +42,9 @@ public class TransportRefreshAction @Inject public TransportRefreshAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - TransportShardRefreshAction shardRefreshAction) { - super(RefreshAction.NAME, RefreshRequest::new, clusterService, transportService, actionFilters, - indexNameExpressionResolver, shardRefreshAction); + IndexNameExpressionResolver indexNameExpressionResolver, NodeClient client) { + super(RefreshAction.NAME, RefreshRequest::new, clusterService, transportService, client, actionFilters, + indexNameExpressionResolver, TransportShardRefreshAction.TYPE); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index c0a52ac8c0d6a..dd981aa995992 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -28,17 +29,21 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class TransportShardRefreshAction extends TransportReplicationAction { public static final String NAME = RefreshAction.NAME + "[s]"; + public static final ActionType TYPE = new ActionType<>(NAME, ReplicationResponse::new); @Inject public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService, @@ -49,8 +54,8 @@ public TransportShardRefreshAction(Settings settings, TransportService transport } @Override - protected ReplicationResponse newResponseInstance() { - return new ReplicationResponse(); + protected ReplicationResponse newResponseInstance(StreamInput in) throws IOException { + return new ReplicationResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java index d0893d35a6754..560a29251eb28 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class RolloverAction extends ActionType { @@ -28,11 +27,7 @@ public class RolloverAction extends ActionType { public static final String NAME = "indices:admin/rollover"; private RolloverAction() { - super(NAME); + super(NAME, RolloverResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return RolloverResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 082b81c1ccf51..65c8432351d1e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -96,6 +96,19 @@ public class RolloverRequest extends AcknowledgedRequest implem //the index name "_na_" is never read back, what matters are settings, mappings and aliases private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); + public RolloverRequest(StreamInput in) throws IOException { + super(in); + alias = in.readString(); + newIndexName = in.readOptionalString(); + dryRun = in.readBoolean(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + Condition condition = in.readNamedWriteable(Condition.class); + this.conditions.put(condition.name, condition); + } + createIndexRequest = new CreateIndexRequest(in); + } + RolloverRequest() {} public RolloverRequest(String alias, String newIndexName) { @@ -112,21 +125,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - alias = in.readString(); - newIndexName = in.readOptionalString(); - dryRun = in.readBoolean(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - Condition condition = in.readNamedWriteable(Condition.class); - this.conditions.put(condition.name, condition); - } - createIndexRequest = new CreateIndexRequest(); - createIndexRequest.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 60f099cb1d034..a2ecc9e678b6d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -80,8 +80,8 @@ public TransportRolloverAction(TransportService transportService, ClusterService ThreadPool threadPool, MetaDataCreateIndexService createIndexService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, MetaDataIndexAliasesService indexAliasesService, Client client) { - super(RolloverAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - RolloverRequest::new); + super(RolloverAction.NAME, transportService, clusterService, threadPool, actionFilters, RolloverRequest::new, + indexNameExpressionResolver); this.createIndexService = createIndexService; this.indexAliasesService = indexAliasesService; this.client = client; @@ -94,11 +94,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected RolloverResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected RolloverResponse read(StreamInput in) throws IOException { return new RolloverResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index cc68a4a7e34b7..973a160c8a0b4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -48,8 +48,12 @@ public class IndicesSegmentResponse extends BroadcastResponse { private Map indicesSegments; - IndicesSegmentResponse() { - + IndicesSegmentResponse(StreamInput in) throws IOException { + super(in); + shards = new ShardSegments[in.readVInt()]; + for (int i = 0; i < shards.length; i++) { + shards[i] = new ShardSegments(in); + } } IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, @@ -82,16 +86,6 @@ public Map getIndices() { return indicesSegments; } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shards = new ShardSegments[in.readVInt()]; - for (int i = 0; i < shards.length; i++) { - shards[i] = ShardSegments.readShardSegments(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java index edc9df6e91207..7b169e672b281 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.segments; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class IndicesSegmentsAction extends StreamableResponseActionType { +public class IndicesSegmentsAction extends ActionType { public static final IndicesSegmentsAction INSTANCE = new IndicesSegmentsAction(); public static final String NAME = "indices:monitor/segments"; private IndicesSegmentsAction() { - super(NAME); - } - - @Override - public IndicesSegmentResponse newResponse() { - return new IndicesSegmentResponse(); + super(NAME, IndicesSegmentResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java index 570fa89e0262b..8c4e9e3bb451a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java @@ -34,6 +34,11 @@ public IndicesSegmentsRequest() { this(Strings.EMPTY_ARRAY); } + public IndicesSegmentsRequest(StreamInput in) throws IOException { + super(in); + verbose = in.readBoolean(); + } + public IndicesSegmentsRequest(String... indices) { super(indices); } @@ -60,11 +65,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(verbose); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - verbose = in.readBoolean(); - } - } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java index 1052740248db4..329f39bb1b4f8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java @@ -22,7 +22,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.engine.Segment; import java.io.IOException; @@ -31,20 +31,30 @@ import java.util.Iterator; import java.util.List; -public class ShardSegments implements Streamable, Iterable { +public class ShardSegments implements Writeable, Iterable { private ShardRouting shardRouting; private List segments; - ShardSegments() { - } - ShardSegments(ShardRouting shardRouting, List segments) { this.shardRouting = shardRouting; this.segments = segments; } + ShardSegments(StreamInput in) throws IOException { + shardRouting = new ShardRouting(in); + int size = in.readVInt(); + if (size == 0) { + segments = Collections.emptyList(); + } else { + segments = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + segments.add(new Segment(in)); + } + } + } + @Override public Iterator iterator() { return segments.iterator(); @@ -78,26 +88,6 @@ public int getNumberOfSearch() { return count; } - public static ShardSegments readShardSegments(StreamInput in) throws IOException { - ShardSegments shard = new ShardSegments(); - shard.readFrom(in); - return shard; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - shardRouting = new ShardRouting(in); - int size = in.readVInt(); - if (size == 0) { - segments = Collections.emptyList(); - } else { - segments = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - segments.add(Segment.readSegment(in)); - } - } - } - @Override public void writeTo(StreamOutput out) throws IOException { shardRouting.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index ca698b655ebbf..434ac2d6b61ce 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -74,7 +74,7 @@ protected ClusterBlockException checkRequestBlock(ClusterState state, IndicesSeg @Override protected ShardSegments readShardResult(StreamInput in) throws IOException { - return ShardSegments.readShardSegments(in); + return new ShardSegments(in); } @Override @@ -87,9 +87,7 @@ protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int @Override protected IndicesSegmentsRequest readRequestFrom(StreamInput in) throws IOException { - final IndicesSegmentsRequest request = new IndicesSegmentsRequest(); - request.readFrom(in); - return request; + return new IndicesSegmentsRequest(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java index 6e17be7241745..ffbc6e6777525 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.settings.get; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class GetSettingsAction extends StreamableResponseActionType { +public class GetSettingsAction extends ActionType { public static final GetSettingsAction INSTANCE = new GetSettingsAction(); public static final String NAME = "indices:monitor/settings/get"; public GetSettingsAction() { - super(NAME); - } - - @Override - public GetSettingsResponse newResponse() { - return new GetSettingsResponse(); + super(NAME, GetSettingsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java index dfcd0e33b3f43..50835986c46eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -124,11 +124,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index 8d77fe39d65cf..87f09e05f80c0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -50,7 +50,21 @@ public GetSettingsResponse(ImmutableOpenMap indexToSettings, this.indexToDefaultSettings = indexToDefaultSettings; } - GetSettingsResponse() { + public GetSettingsResponse(StreamInput in) throws IOException { + super(in); + + int settingsSize = in.readVInt(); + ImmutableOpenMap.Builder settingsBuilder = ImmutableOpenMap.builder(); + for (int i = 0; i < settingsSize; i++) { + settingsBuilder.put(in.readString(), Settings.readSettingsFromStream(in)); + } + ImmutableOpenMap.Builder defaultSettingsBuilder = ImmutableOpenMap.builder(); + int defaultSettingsSize = in.readVInt(); + for (int i = 0; i < defaultSettingsSize; i++) { + defaultSettingsBuilder.put(in.readString(), Settings.readSettingsFromStream(in)); + } + indexToSettings = settingsBuilder.build(); + indexToDefaultSettings = defaultSettingsBuilder.build(); } /** @@ -99,24 +113,6 @@ public String getSetting(String index, String setting) { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - - int settingsSize = in.readVInt(); - ImmutableOpenMap.Builder settingsBuilder = ImmutableOpenMap.builder(); - for (int i = 0; i < settingsSize; i++) { - settingsBuilder.put(in.readString(), Settings.readSettingsFromStream(in)); - } - ImmutableOpenMap.Builder defaultSettingsBuilder = ImmutableOpenMap.builder(); - int defaultSettingsSize = in.readVInt(); - for (int i = 0; i < defaultSettingsSize; i++) { - defaultSettingsBuilder.put(in.readString(), Settings.readSettingsFromStream(in)); - } - indexToSettings = settingsBuilder.build(); - indexToDefaultSettings = defaultSettingsBuilder.build(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(indexToSettings.size()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index fd0004beba292..112781d77ba51 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -40,6 +41,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class TransportGetSettingsAction extends TransportMasterNodeReadAction { @@ -71,8 +74,8 @@ protected ClusterBlockException checkBlock(GetSettingsRequest request, ClusterSt @Override - protected GetSettingsResponse newResponse() { - return new GetSettingsResponse(); + protected GetSettingsResponse read(StreamInput in) throws IOException { + return new GetSettingsResponse(in); } private static boolean isFilteredRequest(GetSettingsRequest request) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index c3192631e2cf5..7f69e8bc05bd6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -49,8 +49,8 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction listener) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java index df9b5137bb3d1..45b87c9af0576 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class UpdateSettingsAction extends ActionType { @@ -29,11 +28,7 @@ public class UpdateSettingsAction extends ActionType { public static final String NAME = "indices:admin/settings/update"; private UpdateSettingsAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index e7b5d82207410..7a9c6db04bc0d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -56,6 +56,14 @@ public class UpdateSettingsRequest extends AcknowledgedRequest source) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - settings = readSettingsFromStream(in); - preserveExisting = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java index b06145bff0931..70ec32ed80a91 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.shards; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for {@link TransportIndicesShardStoresAction} @@ -28,17 +28,12 @@ * Shard store information reports which nodes hold shard copies, how recent they are * and any exceptions on opening the shard index or from previous engine failures */ -public class IndicesShardStoresAction extends StreamableResponseActionType { +public class IndicesShardStoresAction extends ActionType { public static final IndicesShardStoresAction INSTANCE = new IndicesShardStoresAction(); public static final String NAME = "indices:monitor/shard_stores"; private IndicesShardStoresAction() { - super(NAME); - } - - @Override - public IndicesShardStoresResponse newResponse() { - return new IndicesShardStoresResponse(); + super(NAME, IndicesShardStoresResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java index 18e4083095df5..45a9c7283d0ab 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -128,9 +128,4 @@ public IndicesOptions indicesOptions() { public ActionRequestValidationException validate() { return null; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index ef16286371b26..2b17f6418167a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -21,8 +21,8 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -30,7 +30,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,8 +39,6 @@ import java.util.Collections; import java.util.List; -import static org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus.readStoreStatus; - /** * Response for {@link IndicesShardStoresAction} * @@ -52,7 +50,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon /** * Shard store information from a node */ - public static class StoreStatus implements Streamable, ToXContentFragment, Comparable { + public static class StoreStatus implements Writeable, ToXContentFragment, Comparable { private DiscoveryNode node; private String allocationId; private Exception storeException; @@ -111,7 +109,13 @@ private void writeTo(StreamOutput out) throws IOException { } } - private StoreStatus() { + public StoreStatus(StreamInput in) throws IOException { + node = new DiscoveryNode(in); + allocationId = in.readOptionalString(); + allocationStatus = AllocationStatus.readFrom(in); + if (in.readBoolean()) { + storeException = in.readException(); + } } public StoreStatus(DiscoveryNode node, String allocationId, AllocationStatus allocationStatus, Exception storeException) { @@ -154,22 +158,6 @@ public AllocationStatus getAllocationStatus() { return allocationStatus; } - public static StoreStatus readStoreStatus(StreamInput in) throws IOException { - StoreStatus storeStatus = new StoreStatus(); - storeStatus.readFrom(in); - return storeStatus; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - node = new DiscoveryNode(in); - allocationId = in.readOptionalString(); - allocationStatus = AllocationStatus.readFrom(in); - if (in.readBoolean()) { - storeException = in.readException(); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); @@ -232,7 +220,14 @@ public Failure(String nodeId, String index, int shardId, Throwable reason) { this.nodeId = nodeId; } - private Failure() { + private Failure(StreamInput in) throws IOException { + if (in.getVersion().before(Version.V_7_4_0)) { + nodeId = in.readString(); + } + readFrom(in, this); + if (in.getVersion().onOrAfter(Version.V_7_4_0)) { + nodeId = in.readString(); + } } public String nodeId() { @@ -240,21 +235,18 @@ public String nodeId() { } static Failure readFailure(StreamInput in) throws IOException { - Failure failure = new Failure(); - failure.readFrom(in); - return failure; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - nodeId = in.readString(); - super.readFrom(in); + return new Failure(in); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(nodeId); + if (out.getVersion().before(Version.V_7_4_0)) { + out.writeString(nodeId); + } super.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_4_0)) { + out.writeString(nodeId); + } } @Override @@ -278,28 +270,7 @@ public IndicesShardStoresResponse(ImmutableOpenMap>> getStoreStatuses() { - return storeStatuses; - } - - /** - * Returns node {@link Failure}s encountered - * while executing the request - */ - public List getFailures() { - return failures; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + super(in); int numResponse = in.readVInt(); ImmutableOpenMap.Builder>> storeStatusesBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < numResponse; i++) { @@ -311,7 +282,7 @@ public void readFrom(StreamInput in) throws IOException { int nodeEntries = in.readVInt(); List storeStatuses = new ArrayList<>(nodeEntries); for (int nodeCount = 0; nodeCount < nodeEntries; nodeCount++) { - storeStatuses.add(readStoreStatus(in)); + storeStatuses.add(new StoreStatus(in)); } shardEntries.put(shardID, storeStatuses); } @@ -326,6 +297,22 @@ public void readFrom(StreamInput in) throws IOException { failures = Collections.unmodifiableList(failureBuilder); } + /** + * Returns {@link StoreStatus}s + * grouped by their index names and shard ids. + */ + public ImmutableOpenMap>> getStoreStatuses() { + return storeStatuses; + } + + /** + * Returns node {@link Failure}s encountered + * while executing the request + */ + public List getFailures() { + return failures; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(storeStatuses.size()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 3967e5e31f930..5c23753d53bfc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -24,6 +24,8 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -41,8 +43,10 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.gateway.AsyncShardFetch; +import org.elasticsearch.gateway.AsyncShardFetch.Lister; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardId; @@ -50,6 +54,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -65,16 +70,15 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAction { - private final TransportNodesListGatewayStartedShards listShardStoresInfo; + private final NodeClient client; @Inject public TransportIndicesShardStoresAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - TransportNodesListGatewayStartedShards listShardStoresInfo) { + IndexNameExpressionResolver indexNameExpressionResolver, NodeClient client) { super(IndicesShardStoresAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesShardStoresRequest::new, indexNameExpressionResolver); - this.listShardStoresInfo = listShardStoresInfo; + this.client = client; } @Override @@ -83,8 +87,8 @@ protected String executor() { } @Override - protected IndicesShardStoresResponse newResponse() { - return new IndicesShardStoresResponse(); + protected IndicesShardStoresResponse read(StreamInput in) throws IOException { + return new IndicesShardStoresResponse(in); } @Override @@ -147,16 +151,26 @@ void start() { if (shardIds.isEmpty()) { listener.onResponse(new IndicesShardStoresResponse()); } else { + // explicitely type lister, some IDEs (Eclipse) are not able to correctly infer the function type + Lister, NodeGatewayStartedShards> lister = this::listStartedShards; for (ShardId shardId : shardIds) { - InternalAsyncFetch fetch = new InternalAsyncFetch(logger, "shard_stores", shardId, listShardStoresInfo); + InternalAsyncFetch fetch = new InternalAsyncFetch(logger, "shard_stores", shardId, lister); fetch.fetchData(nodes, Collections.emptySet()); } } } + private void listStartedShards(ShardId shardId, DiscoveryNode[] nodes, + ActionListener> listener) { + var request = new TransportNodesListGatewayStartedShards.Request(shardId, nodes); + client.executeLocally(TransportNodesListGatewayStartedShards.TYPE, request, + ActionListener.wrap(listener::onResponse, listener::onFailure)); + } + private class InternalAsyncFetch extends AsyncShardFetch { - InternalAsyncFetch(Logger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) { + InternalAsyncFetch(Logger logger, String type, ShardId shardId, + Lister, NodeGatewayStartedShards> action) { super(logger, type, shardId, action); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java index 28b650cf0b747..bdf8b7e0546cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class ResizeAction extends ActionType { @@ -28,11 +27,7 @@ public class ResizeAction extends ActionType { public static final String NAME = "indices:admin/resize"; private ResizeAction() { - super(NAME); + super(NAME, ResizeResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return ResizeResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index c81b9d73658ab..a42ed270f84f6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -57,6 +57,14 @@ public class ResizeRequest extends AcknowledgedRequest implements private ResizeType type = ResizeType.SHRINK; private Boolean copySettings = true; + public ResizeRequest(StreamInput in) throws IOException { + super(in); + targetIndexRequest = new CreateIndexRequest(in); + sourceIndex = in.readString(); + type = in.readEnum(ResizeType.class); + copySettings = in.readOptionalBoolean(); + } + ResizeRequest() {} public ResizeRequest(String targetIndex, String sourceIndex) { @@ -87,16 +95,6 @@ public void setSourceIndex(String index) { this.sourceIndex = index; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - targetIndexRequest = new CreateIndexRequest(); - targetIndexRequest.readFrom(in); - sourceIndex = in.readString(); - type = in.readEnum(ResizeType.class); - copySettings = in.readOptionalBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java index 57de49307018c..1881f969c445b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class ShrinkAction extends ActionType { @@ -28,11 +27,7 @@ public class ShrinkAction extends ActionType { public static final String NAME = "indices:admin/shrink"; private ShrinkAction() { - super(NAME); + super(NAME, ResizeResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return ResizeResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 178c15e473bd4..bbbd366ba1b5a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -72,8 +72,7 @@ public TransportResizeAction(TransportService transportService, ClusterService c protected TransportResizeAction(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataCreateIndexService createIndexService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client) { - super(actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - ResizeRequest::new); + super(actionName, transportService, clusterService, threadPool, actionFilters, ResizeRequest::new, indexNameExpressionResolver); this.createIndexService = createIndexService; this.client = client; } @@ -85,11 +84,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected ResizeResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ResizeResponse read(StreamInput in) throws IOException { return new ResizeResponse(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index b604cd41c05cf..2f8423916b3c2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -248,22 +248,22 @@ public CommonStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalStreamable(docs); - out.writeOptionalStreamable(store); - out.writeOptionalStreamable(indexing); - out.writeOptionalStreamable(get); + out.writeOptionalWriteable(docs); + out.writeOptionalWriteable(store); + out.writeOptionalWriteable(indexing); + out.writeOptionalWriteable(get); out.writeOptionalWriteable(search); - out.writeOptionalStreamable(merge); - out.writeOptionalStreamable(refresh); - out.writeOptionalStreamable(flush); - out.writeOptionalStreamable(warmer); - out.writeOptionalStreamable(queryCache); - out.writeOptionalStreamable(fieldData); - out.writeOptionalStreamable(completion); - out.writeOptionalStreamable(segments); - out.writeOptionalStreamable(translog); - out.writeOptionalStreamable(requestCache); - out.writeOptionalStreamable(recoveryStats); + out.writeOptionalWriteable(merge); + out.writeOptionalWriteable(refresh); + out.writeOptionalWriteable(flush); + out.writeOptionalWriteable(warmer); + out.writeOptionalWriteable(queryCache); + out.writeOptionalWriteable(fieldData); + out.writeOptionalWriteable(completion); + out.writeOptionalWriteable(segments); + out.writeOptionalWriteable(translog); + out.writeOptionalWriteable(requestCache); + out.writeOptionalWriteable(recoveryStats); } public void add(CommonStats stats) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java index 6c1de5b2992c4..e6a3b43093cef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java @@ -21,20 +21,27 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.Arrays; import java.util.Iterator; -public class IndexShardStats implements Iterable, Streamable { +public class IndexShardStats implements Iterable, Writeable { private ShardId shardId; private ShardStats[] shards; - private IndexShardStats() {} + public IndexShardStats(StreamInput in) throws IOException { + shardId = new ShardId(in); + int shardSize = in.readVInt(); + shards = new ShardStats[shardSize]; + for (int i = 0; i < shardSize; i++) { + shards[i] = new ShardStats(in); + } + } public IndexShardStats(ShardId shardId, ShardStats[] shards) { this.shardId = shardId; @@ -88,16 +95,6 @@ public CommonStats getPrimary() { return stats; } - @Override - public void readFrom(StreamInput in) throws IOException { - shardId = new ShardId(in); - int shardSize = in.readVInt(); - shards = new ShardStats[shardSize]; - for (int i = 0; i < shardSize; i++) { - shards[i] = ShardStats.readShardStats(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); @@ -106,11 +103,4 @@ public void writeTo(StreamOutput out) throws IOException { stats.writeTo(out); } } - - public static IndexShardStats readIndexShardStats(StreamInput in) throws IOException { - IndexShardStats indexShardStats = new IndexShardStats(); - indexShardStats.readFrom(in); - return indexShardStats; - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java index 4d2717d6a006f..caa915ba604f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class IndicesStatsAction extends StreamableResponseActionType { +public class IndicesStatsAction extends ActionType { public static final IndicesStatsAction INSTANCE = new IndicesStatsAction(); public static final String NAME = "indices:monitor/stats"; private IndicesStatsAction() { - super(NAME); - } - - @Override - public IndicesStatsResponse newResponse() { - return new IndicesStatsResponse(); + super(NAME, IndicesStatsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java index b162a23258591..345939900b822 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -38,6 +38,15 @@ public class IndicesStatsRequest extends BroadcastRequest { private CommonStatsFlags flags = new CommonStatsFlags(); + public IndicesStatsRequest() { + super((String[])null); + } + + public IndicesStatsRequest(StreamInput in) throws IOException { + super(in); + flags = new CommonStatsFlags(in); + } + /** * Sets all flags to return all stats. */ @@ -281,10 +290,4 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); flags.writeTo(out); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - flags = new CommonStatsFlags(in); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 0540bc3ad5cc1..1dd093b8485ac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -43,8 +43,9 @@ public class IndicesStatsResponse extends BroadcastResponse { private Map shardStatsMap; - IndicesStatsResponse() { - + IndicesStatsResponse(StreamInput in) throws IOException { + super(in); + shards = in.readArray(ShardStats::new, (size) -> new ShardStats[size]); } IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, @@ -126,12 +127,6 @@ public CommonStats getPrimaries() { return stats; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shards = in.readArray(ShardStats::readShardStats, (size) -> new ShardStats[size]); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index 76c18654321c7..8fd04f092b5c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -34,7 +33,7 @@ import java.io.IOException; -public class ShardStats implements Streamable, Writeable, ToXContentFragment { +public class ShardStats implements Writeable, ToXContentFragment { private ShardRouting shardRouting; private CommonStats commonStats; @@ -59,7 +58,15 @@ public RetentionLeaseStats getRetentionLeaseStats() { private String statePath; private boolean isCustomDataPath; - ShardStats() { + public ShardStats(StreamInput in) throws IOException { + shardRouting = new ShardRouting(in); + commonStats = new CommonStats(in); + commitStats = CommitStats.readOptionalCommitStatsFrom(in); + statePath = in.readString(); + dataPath = in.readString(); + isCustomDataPath = in.readBoolean(); + seqNoStats = in.readOptionalWriteable(SeqNoStats::new); + retentionLeaseStats = in.readOptionalWriteable(RetentionLeaseStats::new); } public ShardStats( @@ -112,29 +119,11 @@ public boolean isCustomDataPath() { return isCustomDataPath; } - public static ShardStats readShardStats(StreamInput in) throws IOException { - ShardStats stats = new ShardStats(); - stats.readFrom(in); - return stats; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - shardRouting = new ShardRouting(in); - commonStats = new CommonStats(in); - commitStats = CommitStats.readOptionalCommitStatsFrom(in); - statePath = in.readString(); - dataPath = in.readString(); - isCustomDataPath = in.readBoolean(); - seqNoStats = in.readOptionalWriteable(SeqNoStats::new); - retentionLeaseStats = in.readOptionalWriteable(RetentionLeaseStats::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { shardRouting.writeTo(out); commonStats.writeTo(out); - out.writeOptionalStreamable(commitStats); + out.writeOptionalWriteable(commitStats); out.writeString(statePath); out.writeString(dataPath); out.writeBoolean(isCustomDataPath); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 8371023738b3b..400358f59afeb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -77,7 +77,7 @@ protected ClusterBlockException checkRequestBlock(ClusterState state, IndicesSta @Override protected ShardStats readShardResult(StreamInput in) throws IOException { - return ShardStats.readShardStats(in); + return new ShardStats(in); } @Override @@ -90,9 +90,7 @@ protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int tota @Override protected IndicesStatsRequest readRequestFrom(StreamInput in) throws IOException { - IndicesStatsRequest request = new IndicesStatsRequest(); - request.readFrom(in); - return request; + return new IndicesStatsRequest(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java index b08d2886b495e..9c1d6b30aa4cf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class DeleteIndexTemplateAction extends ActionType { @@ -29,11 +28,8 @@ public class DeleteIndexTemplateAction extends ActionType public static final String NAME = "indices:admin/template/delete"; private DeleteIndexTemplateAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new + ); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java index f33d35b66a570..9ee4fdd6f4365 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java @@ -34,6 +34,11 @@ public class DeleteIndexTemplateRequest extends MasterNodeRequest { +public class GetIndexTemplatesAction extends ActionType { public static final GetIndexTemplatesAction INSTANCE = new GetIndexTemplatesAction(); public static final String NAME = "indices:admin/template/get"; protected GetIndexTemplatesAction() { - super(NAME); - } - - @Override - public GetIndexTemplatesResponse newResponse() { - return new GetIndexTemplatesResponse(); + super(NAME, GetIndexTemplatesResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java index cfaa9408da1e5..3a61ae6b91d57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java @@ -82,9 +82,4 @@ public GetIndexTemplatesRequest names(String... names) { public String[] names() { return this.names; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index fd776cafcff6f..502f073c0a38d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -39,8 +39,13 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont private final List indexTemplates; - GetIndexTemplatesResponse() { + public GetIndexTemplatesResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); indexTemplates = new ArrayList<>(); + for (int i = 0 ; i < size ; i++) { + indexTemplates.add(0, IndexTemplateMetaData.readFrom(in)); + } } public GetIndexTemplatesResponse(List indexTemplates) { @@ -51,16 +56,6 @@ public List getIndexTemplates() { return indexTemplates; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - indexTemplates.clear(); - for (int i = 0 ; i < size ; i++) { - indexTemplates.add(0, IndexTemplateMetaData.readFrom(in)); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(indexTemplates.size()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 1d733667fc3d5..ecb3470c1eb33 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -29,16 +29,19 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAction { +public class TransportGetIndexTemplatesAction extends + TransportMasterNodeReadAction { @Inject public TransportGetIndexTemplatesAction(TransportService transportService, ClusterService clusterService, @@ -54,13 +57,13 @@ protected String executor() { } @Override - protected ClusterBlockException checkBlock(GetIndexTemplatesRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + protected GetIndexTemplatesResponse read(StreamInput in) throws IOException { + return new GetIndexTemplatesResponse(in); } @Override - protected GetIndexTemplatesResponse newResponse() { - return new GetIndexTemplatesResponse(); + protected ClusterBlockException checkBlock(GetIndexTemplatesRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java index 2e57d252deab4..0bd939b2cba82 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class PutIndexTemplateAction extends ActionType { @@ -29,11 +28,7 @@ public class PutIndexTemplateAction extends ActionType { public static final String NAME = "indices:admin/template/put"; private PutIndexTemplateAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index a23fac0ac12b3..511e2fea59090 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -88,6 +88,27 @@ public class PutIndexTemplateRequest extends MasterNodeRequest { +public class UpgradeStatusAction extends ActionType { public static final UpgradeStatusAction INSTANCE = new UpgradeStatusAction(); public static final String NAME = "indices:monitor/upgrade"; private UpgradeStatusAction() { - super(NAME); - } - - @Override - public UpgradeStatusResponse newResponse() { - return new UpgradeStatusResponse(); + super(NAME, UpgradeStatusResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java index de4ea72c66365..c42a9c11860c2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java @@ -21,6 +21,9 @@ import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; public class UpgradeStatusRequest extends BroadcastRequest { @@ -28,6 +31,10 @@ public UpgradeStatusRequest() { this(Strings.EMPTY_ARRAY); } + public UpgradeStatusRequest(StreamInput in) throws IOException { + super(in); + } + public UpgradeStatusRequest(String... indices) { super(indices); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 76a85a2416374..cce1c5a673527 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -39,7 +39,12 @@ public class UpgradeStatusResponse extends BroadcastResponse { private Map indicesUpgradeStatus; - UpgradeStatusResponse() { + UpgradeStatusResponse(StreamInput in) throws IOException { + super(in); + shards = new ShardUpgradeStatus[in.readVInt()]; + for (int i = 0; i < shards.length; i++) { + shards[i] = new ShardUpgradeStatus(in); + } } UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, @@ -72,15 +77,6 @@ public Map getIndices() { return indicesUpgradeStats; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shards = new ShardUpgradeStatus[in.readVInt()]; - for (int i = 0; i < shards.length; i++) { - shards[i] = ShardUpgradeStatus.readShardUpgradeStatus(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java index 7b57c9680bd16..14dae446d1f0e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java @@ -22,13 +22,13 @@ import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.text.ParseException; -class ShardUpgradeResult implements Streamable { +class ShardUpgradeResult implements Writeable { private ShardId shardId; @@ -38,10 +38,6 @@ class ShardUpgradeResult implements Streamable { private boolean primary; - - ShardUpgradeResult() { - } - ShardUpgradeResult(ShardId shardId, boolean primary, Version upgradeVersion, org.apache.lucene.util.Version oldestLuceneSegment) { this.shardId = shardId; this.primary = primary; @@ -49,6 +45,17 @@ class ShardUpgradeResult implements Streamable { this.oldestLuceneSegment = oldestLuceneSegment; } + ShardUpgradeResult(StreamInput in) throws IOException { + shardId = new ShardId(in); + primary = in.readBoolean(); + upgradeVersion = Version.readVersion(in); + try { + oldestLuceneSegment = org.apache.lucene.util.Version.parse(in.readString()); + } catch (ParseException ex) { + throw new IOException("failed to parse lucene version [" + oldestLuceneSegment + "]", ex); + } + } + public ShardId getShardId() { return shardId; } @@ -65,20 +72,6 @@ public boolean primary() { return primary; } - - @Override - public void readFrom(StreamInput in) throws IOException { - shardId = new ShardId(in); - primary = in.readBoolean(); - upgradeVersion = Version.readVersion(in); - try { - oldestLuceneSegment = org.apache.lucene.util.Version.parse(in.readString()); - } catch (ParseException ex) { - throw new IOException("failed to parse lucene version [" + oldestLuceneSegment + "]", ex); - } - - } - @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index b122350c3e61d..3bd28cfc6e2cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -130,16 +130,12 @@ protected ShardUpgradeResult shardOperation(UpgradeRequest request, ShardRouting @Override protected ShardUpgradeResult readShardResult(StreamInput in) throws IOException { - ShardUpgradeResult result = new ShardUpgradeResult(); - result.readFrom(in); - return result; + return new ShardUpgradeResult(in); } @Override protected UpgradeRequest readRequestFrom(StreamInput in) throws IOException { - UpgradeRequest request = new UpgradeRequest(); - request.readFrom(in); - return request; + return new UpgradeRequest(in); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index a747b6f058bc6..a3283a1e33868 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -48,7 +48,7 @@ public TransportUpgradeSettingsAction(TransportService transportService, Cluster ThreadPool threadPool, MetaDataUpdateSettingsService updateSettingsService, IndexNameExpressionResolver indexNameExpressionResolver, ActionFilters actionFilters) { super(UpgradeSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, UpgradeSettingsRequest::new); + UpgradeSettingsRequest::new, indexNameExpressionResolver); this.updateSettingsService = updateSettingsService; } @@ -68,11 +68,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, final UpgradeSettingsRequest request, final ClusterState state, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java index 3a3344d1a2edb..22ce9a3c0cb3b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java @@ -19,22 +19,17 @@ package org.elasticsearch.action.admin.indices.upgrade.post; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * Upgrade index/indices action. */ -public class UpgradeAction extends StreamableResponseActionType { +public class UpgradeAction extends ActionType { public static final UpgradeAction INSTANCE = new UpgradeAction(); public static final String NAME = "indices:admin/upgrade"; private UpgradeAction() { - super(NAME); - } - - @Override - public UpgradeResponse newResponse() { - return new UpgradeResponse(); + super(NAME, UpgradeResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java index 6a0288b78d4b0..1cb14ca6fd47c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java @@ -49,13 +49,8 @@ public UpgradeRequest(String... indices) { super(indices); } - public UpgradeRequest() { - - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public UpgradeRequest(StreamInput in) throws IOException { + super(in); upgradeOnlyAncientSegments = in.readBoolean(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java index 4a760e273a0fa..9dacda2f16ffa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -41,19 +41,8 @@ public class UpgradeResponse extends BroadcastResponse { private Map> versions; - UpgradeResponse() { - - } - - UpgradeResponse(Map> versions, int totalShards, int successfulShards, int failedShards, - List shardFailures) { - super(totalShards, successfulShards, failedShards, shardFailures); - this.versions = versions; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + UpgradeResponse(StreamInput in) throws IOException { + super(in); int size = in.readVInt(); versions = new HashMap<>(); for (int i=0; i> versions, int totalShards, int successfulShards, int failedShards, + List shardFailures) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.versions = versions; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java index 3876e7be88225..15ab022214640 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class UpgradeSettingsAction extends ActionType { @@ -29,11 +28,7 @@ public class UpgradeSettingsAction extends ActionType { public static final String NAME = "internal:indices/admin/upgrade"; private UpgradeSettingsAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java index e1e4a5fde8dd0..cce8ef7185e16 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java @@ -39,6 +39,18 @@ public class UpgradeSettingsRequest extends AcknowledgedRequest> versions; + public UpgradeSettingsRequest(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + versions = new HashMap<>(); + for (int i=0; i(upgradeVersion, oldestLuceneSegment)); + } + } + public UpgradeSettingsRequest() { } @@ -74,20 +86,6 @@ public UpgradeSettingsRequest versions(Map> versi return this; } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - versions = new HashMap<>(); - for (int i=0; i(upgradeVersion, oldestLuceneSegment)); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java index 3c796a061a90a..61745d8c10602 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -34,7 +34,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class QueryExplanation implements Streamable, ToXContentFragment { +public class QueryExplanation implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; public static final String SHARD_FIELD = "shard"; @@ -79,8 +79,12 @@ public class QueryExplanation implements Streamable, ToXContentFragment { private String error; - QueryExplanation() { - + public QueryExplanation(StreamInput in) throws IOException { + index = in.readOptionalString(); + shard = in.readInt(); + valid = in.readBoolean(); + explanation = in.readOptionalString(); + error = in.readOptionalString(); } public QueryExplanation(String index, int shard, boolean valid, String explanation, @@ -112,15 +116,6 @@ public String getExplanation() { return this.explanation; } - @Override - public void readFrom(StreamInput in) throws IOException { - index = in.readOptionalString(); - shard = in.readInt(); - valid = in.readBoolean(); - explanation = in.readOptionalString(); - error = in.readOptionalString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(index); @@ -130,12 +125,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(error); } - public static QueryExplanation readQueryExplanation(StreamInput in) throws IOException { - QueryExplanation exp = new QueryExplanation(); - exp.readFrom(in); - return exp; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (getIndex() != null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index 2ccf2f1bd3e21..050bb7b5d8ba1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -42,7 +42,21 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { private long nowInMillis; private AliasFilter filteringAliases; - public ShardValidateQueryRequest() { + public ShardValidateQueryRequest(StreamInput in) throws IOException { + super(in); + query = in.readNamedWriteable(QueryBuilder.class); + + int typesSize = in.readVInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readString(); + } + } + filteringAliases = new AliasFilter(in); + explain = in.readBoolean(); + rewrite = in.readBoolean(); + nowInMillis = in.readVLong(); } public ShardValidateQueryRequest(ShardId shardId, AliasFilter filteringAliases, ValidateQueryRequest request) { @@ -79,24 +93,6 @@ public long nowInMillis() { return this.nowInMillis; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - query = in.readNamedWriteable(QueryBuilder.class); - - int typesSize = in.readVInt(); - if (typesSize > 0) { - types = new String[typesSize]; - for (int i = 0; i < typesSize; i++) { - types[i] = in.readString(); - } - } - filteringAliases = new AliasFilter(in); - explain = in.readBoolean(); - rewrite = in.readBoolean(); - nowInMillis = in.readVLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java index 43d3ad823053d..edd8e82cd66b6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java @@ -39,8 +39,11 @@ class ShardValidateQueryResponse extends BroadcastShardResponse { private String error; - ShardValidateQueryResponse() { - + ShardValidateQueryResponse(StreamInput in) throws IOException { + super(in); + valid = in.readBoolean(); + explanation = in.readOptionalString(); + error = in.readOptionalString(); } ShardValidateQueryResponse(ShardId shardId, boolean valid, String explanation, String error) { @@ -62,14 +65,6 @@ public String getError() { return error; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - valid = in.readBoolean(); - explanation = in.readOptionalString(); - error = in.readOptionalString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 8f85e91d29ee2..498f2d6e7b29c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.ParsedQuery; @@ -122,8 +123,8 @@ protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting } @Override - protected ShardValidateQueryResponse newShardResponse() { - return new ShardValidateQueryResponse(); + protected ShardValidateQueryResponse readShardResponse(StreamInput in) throws IOException { + return new ShardValidateQueryResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java index 713f94003642a..07421d17931e1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ValidateQueryAction extends StreamableResponseActionType { +public class ValidateQueryAction extends ActionType { public static final ValidateQueryAction INSTANCE = new ValidateQueryAction(); public static final String NAME = "indices:admin/validate/query"; private ValidateQueryAction() { - super(NAME); - } - - @Override - public ValidateQueryResponse newResponse() { - return new ValidateQueryResponse(); + super(NAME, ValidateQueryResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 4f993e40d0b8b..06965beb44df2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -55,6 +55,21 @@ public ValidateQueryRequest() { this(Strings.EMPTY_ARRAY); } + public ValidateQueryRequest(StreamInput in) throws IOException { + super(in); + query = in.readNamedWriteable(QueryBuilder.class); + int typesSize = in.readVInt(); + if (typesSize > 0) { + types = new String[typesSize]; + for (int i = 0; i < typesSize; i++) { + types[i] = in.readString(); + } + } + explain = in.readBoolean(); + rewrite = in.readBoolean(); + allShards = in.readBoolean(); + } + /** * Constructs a new validate request against the provided indices. No indices provided means it will * run against all indices. @@ -150,22 +165,6 @@ public boolean allShards() { return allShards; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - query = in.readNamedWriteable(QueryBuilder.class); - int typesSize = in.readVInt(); - if (typesSize > 0) { - types = new String[typesSize]; - for (int i = 0; i < typesSize; i++) { - types[i] = in.readString(); - } - } - explain = in.readBoolean(); - rewrite = in.readBoolean(); - allShards = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index f766e1d9c6aa4..2489011fd3d73 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -34,7 +34,6 @@ import java.util.Collections; import java.util.List; -import static org.elasticsearch.action.admin.indices.validate.query.QueryExplanation.readQueryExplanation; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -77,8 +76,16 @@ public class ValidateQueryResponse extends BroadcastResponse { private List queryExplanations; - ValidateQueryResponse() { - + ValidateQueryResponse(StreamInput in) throws IOException { + super(in); + valid = in.readBoolean(); + int size = in.readVInt(); + if (size > 0) { + queryExplanations = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + queryExplanations.add(new QueryExplanation(in)); + } + } } ValidateQueryResponse(boolean valid, List queryExplanations, int totalShards, int successfulShards, int failedShards, @@ -108,19 +115,6 @@ public List getQueryExplanation() { return queryExplanations; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - valid = in.readBoolean(); - int size = in.readVInt(); - if (size > 0) { - queryExplanations = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - queryExplanations.add(readQueryExplanation(in)); - } - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java index bd4ac7ddf4788..009fb0b6fbe28 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java @@ -19,22 +19,17 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.transport.TransportRequestOptions; -public class BulkAction extends StreamableResponseActionType { +public class BulkAction extends ActionType { public static final BulkAction INSTANCE = new BulkAction(); public static final String NAME = "indices:data/write/bulk"; private BulkAction() { - super(NAME); - } - - @Override - public BulkResponse newResponse() { - return new BulkResponse(); + super(NAME, BulkResponse::new); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 434f87de121ed..e1306a437ca98 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -23,19 +23,23 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; import java.util.Objects; -public class BulkItemRequest implements Streamable { +public class BulkItemRequest implements Writeable { private int id; private DocWriteRequest request; private volatile BulkItemResponse primaryResponse; - BulkItemRequest() { - + BulkItemRequest(StreamInput in) throws IOException { + id = in.readVInt(); + request = DocWriteRequest.readDocumentRequest(in); + if (in.readBoolean()) { + primaryResponse = new BulkItemResponse(in); + } } // NOTE: public for testing only @@ -89,25 +93,10 @@ public void abort(String index, Exception cause) { } } - public static BulkItemRequest readBulkItem(StreamInput in) throws IOException { - BulkItemRequest item = new BulkItemRequest(); - item.readFrom(in); - return item; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - id = in.readVInt(); - request = DocWriteRequest.readDocumentRequest(in); - if (in.readBoolean()) { - primaryResponse = BulkItemResponse.readBulkItem(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); DocWriteRequest.writeDocumentRequest(out, request); - out.writeOptionalStreamable(primaryResponse); + out.writeOptionalWriteable(primaryResponse); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index dff7479d46219..396d59c71c3ae 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.StatusToXContentObject; @@ -52,7 +51,7 @@ * Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id * of the relevant action, and if it has failed or not (with the failure message incase it failed). */ -public class BulkItemResponse implements Streamable, StatusToXContentObject { +public class BulkItemResponse implements Writeable, StatusToXContentObject { private static final String _INDEX = "_index"; private static final String _TYPE = "_type"; @@ -343,8 +342,24 @@ public String toString() { private Failure failure; - BulkItemResponse() { + BulkItemResponse() {} + BulkItemResponse(StreamInput in) throws IOException { + id = in.readVInt(); + opType = OpType.fromId(in.readByte()); + + byte type = in.readByte(); + if (type == 0) { + response = new IndexResponse(in); + } else if (type == 1) { + response = new DeleteResponse(in); + } else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses' + response = new UpdateResponse(in); + } + + if (in.readBoolean()) { + failure = new Failure(in); + } } public BulkItemResponse(int id, OpType opType, DocWriteResponse response) { @@ -445,35 +460,6 @@ public Failure getFailure() { return this.failure; } - public static BulkItemResponse readBulkItem(StreamInput in) throws IOException { - BulkItemResponse response = new BulkItemResponse(); - response.readFrom(in); - return response; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - id = in.readVInt(); - opType = OpType.fromId(in.readByte()); - - byte type = in.readByte(); - if (type == 0) { - response = new IndexResponse(); - response.readFrom(in); - } else if (type == 1) { - response = new DeleteResponse(); - response.readFrom(in); - - } else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses' - response = new UpdateResponse(); - response.readFrom(in); - } - - if (in.readBoolean()) { - failure = new Failure(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 2b935f0f0fc47..c1a4014b94b93 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -78,7 +78,17 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques private long sizeInBytes = 0; - public BulkRequest() { + public BulkRequest() {} + + public BulkRequest(StreamInput in) throws IOException { + super(in); + waitForActiveShards = ActiveShardCount.readFrom(in); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + requests.add(DocWriteRequest.readDocumentRequest(in)); + } + refreshPolicy = RefreshPolicy.readFrom(in); + timeout = in.readTimeValue(); } public BulkRequest(@Nullable String globalIndex) { @@ -389,18 +399,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - waitForActiveShards = ActiveShardCount.readFrom(in); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - requests.add(DocWriteRequest.readDocumentRequest(in)); - } - refreshPolicy = RefreshPolicy.readFrom(in); - timeout = in.readTimeValue(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 6713db9869243..c021825bc7a8a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -56,7 +56,16 @@ public class BulkResponse extends ActionResponse implements Iterable iterator() { return Arrays.stream(responses).iterator(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - responses = new BulkItemResponse[in.readVInt()]; - for (int i = 0; i < responses.length; i++) { - responses[i] = BulkItemResponse.readBulkItem(in); - } - tookInMillis = in.readVLong(); - ingestTookInMillis = in.readZLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(responses.length); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 15196973f9e37..29e00c20446bb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -38,7 +38,7 @@ public BulkShardRequest(StreamInput in) throws IOException { items = new BulkItemRequest[in.readVInt()]; for (int i = 0; i < items.length; i++) { if (in.readBoolean()) { - items[i] = BulkItemRequest.readBulkItem(in); + items[i] = new BulkItemRequest(in); } } } @@ -84,11 +84,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String toString() { // This is included in error messages so we'll try to make it somewhat user friendly. diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index fc58e620738da..63ab78547d651 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -30,10 +30,16 @@ public class BulkShardResponse extends ReplicationResponse implements WriteResponse { - private ShardId shardId; - private BulkItemResponse[] responses; + private final ShardId shardId; + private final BulkItemResponse[] responses; - BulkShardResponse() { + BulkShardResponse(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); + responses = new BulkItemResponse[in.readVInt()]; + for (int i = 0; i < responses.length; i++) { + responses[i] = new BulkItemResponse(in); + } } // NOTE: public for testing only @@ -64,16 +70,6 @@ public void setForcedRefresh(boolean forcedRefresh) { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - responses = new BulkItemResponse[in.readVInt()]; - for (int i = 0; i < responses.length; i++) { - responses[i] = BulkItemResponse.readBulkItem(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index b0efa35819acb..7a18dfc6fdee0 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -62,6 +62,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.ingest.IngestService; @@ -81,7 +82,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongSupplier; -import java.util.function.Supplier; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; @@ -114,7 +114,7 @@ public TransportBulkAction(ThreadPool threadPool, TransportService transportServ ClusterService clusterService, IngestService ingestService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) { - super(BulkAction.NAME, transportService, actionFilters, (Supplier) BulkRequest::new, ThreadPool.Names.WRITE); + super(BulkAction.NAME, transportService, actionFilters, BulkRequest::new, ThreadPool.Names.WRITE); Objects.requireNonNull(relativeTimeProvider); this.threadPool = threadPool; this.clusterService = clusterService; @@ -682,7 +682,8 @@ void markCurrentItemAsDropped() { new BulkItemResponse(currentSlot, indexRequest.opType(), new UpdateResponse( new ShardId(indexRequest.index(), IndexMetaData.INDEX_UUID_NA_VALUE, 0), - indexRequest.type(), indexRequest.id(), indexRequest.version(), DocWriteResponse.Result.NOOP + indexRequest.type(), indexRequest.id(), SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + indexRequest.version(), DocWriteResponse.Result.NOOP ) ) ); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 6479af1c7afc5..8d0900995a3f0 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -24,12 +24,11 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.MessageSupplier; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -51,6 +50,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentHelper; @@ -70,6 +70,7 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.Map; import java.util.concurrent.Executor; import java.util.function.Consumer; @@ -79,12 +80,7 @@ public class TransportShardBulkAction extends TransportWriteAction { public static final String ACTION_NAME = BulkAction.NAME + "[s]"; - public static final ActionType TYPE = new StreamableResponseActionType<>(ACTION_NAME) { - @Override - public BulkShardResponse newResponse() { - return new BulkShardResponse(); - } - }; + public static final ActionType TYPE = new ActionType<>(ACTION_NAME, BulkShardResponse::new); private static final Logger logger = LogManager.getLogger(TransportShardBulkAction.class); @@ -108,8 +104,8 @@ protected TransportRequestOptions transportOptions(Settings settings) { } @Override - protected BulkShardResponse newResponseInstance() { - return new BulkShardResponse(); + protected BulkShardResponse newResponseInstance(StreamInput in) throws IOException { + return new BulkShardResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteAction.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteAction.java index be299715095dc..60468993e4eec 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteAction.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.delete; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class DeleteAction extends StreamableResponseActionType { +public class DeleteAction extends ActionType { public static final DeleteAction INSTANCE = new DeleteAction(); public static final String NAME = "indices:data/write/delete"; private DeleteAction() { - super(NAME); - } - - @Override - public DeleteResponse newResponse() { - return new DeleteResponse(); + super(NAME, DeleteResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 2622295545190..8074f7cbd6d42 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -289,11 +289,6 @@ public OpType opType() { return OpType.DELETE; } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java index 14b7f65239ba1..5961797e3a084 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.delete; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -36,7 +37,8 @@ */ public class DeleteResponse extends DocWriteResponse { - public DeleteResponse() { + public DeleteResponse(StreamInput in) throws IOException { + super(in); } public DeleteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean found) { diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java index 2eab7215fce46..8a30c1fb002cc 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.explain; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; /** * Entry point for the explain feature. @@ -31,11 +30,7 @@ public class ExplainAction extends ActionType { public static final String NAME = "indices:data/read/explain"; private ExplainAction() { - super(NAME); + super(NAME, ExplainResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return ExplainResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java index 8ca34a0a1ee57..d06a5b74bc777 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java @@ -91,7 +91,7 @@ public ExplainResponse(StreamInput in) throws IOException { explanation = readExplanation(in); } if (in.readBoolean()) { - getResult = GetResult.readGetResult(in); + getResult = new GetResult(in); } } @@ -136,11 +136,6 @@ public RestStatus status() { return exists ? RestStatus.OK : RestStatus.NOT_FOUND; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java index e64a3b67c102b..30bceef47e787 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java @@ -19,19 +19,15 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class FieldCapabilitiesAction extends StreamableResponseActionType { +public class FieldCapabilitiesAction extends ActionType { public static final FieldCapabilitiesAction INSTANCE = new FieldCapabilitiesAction(); public static final String NAME = "indices:data/read/field_caps"; private FieldCapabilitiesAction() { - super(NAME); + super(NAME, FieldCapabilitiesResponse::new); } - @Override - public FieldCapabilitiesResponse newResponse() { - return new FieldCapabilitiesResponse(); - } } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index 6bbb822ed56d9..cc5c0921c6640 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -40,11 +40,11 @@ public class FieldCapabilitiesIndexResponse extends ActionResponse implements Wr this.responseMap = responseMap; } - FieldCapabilitiesIndexResponse() { - } - - FieldCapabilitiesIndexResponse(StreamInput input) throws IOException { - this.readFrom(input); + FieldCapabilitiesIndexResponse(StreamInput in) throws IOException { + super(in); + this.indexName = in.readString(); + this.responseMap = + in.readMap(StreamInput::readString, FieldCapabilities::new); } @@ -70,14 +70,6 @@ public FieldCapabilities getField(String field) { return responseMap.get(field); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.indexName = in.readString(); - this.responseMap = - in.readMap(StreamInput::readString, FieldCapabilities::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(indexName); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 75686cc6b0eae..442ed3f68ee9c 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -56,6 +56,19 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields), FIELDS_FIELD); } + public FieldCapabilitiesRequest(StreamInput in) throws IOException { + super(in); + fields = in.readStringArray(); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + mergeResults = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + includeUnmapped = in.readBoolean(); + } else { + includeUnmapped = false; + } + } + public FieldCapabilitiesRequest() {} /** @@ -77,20 +90,6 @@ void setMergeResults(boolean mergeResults) { this.mergeResults = mergeResults; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - fields = in.readStringArray(); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - mergeResults = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_2_0)) { - includeUnmapped = in.readBoolean(); - } else { - includeUnmapped = false; - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 1d04d07da2932..0796d40dc9151 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -48,9 +48,9 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField FIELDS_FIELD = new ParseField("fields"); - private String[] indices; - private Map> responseMap; - private List indexResponses; + private final String[] indices; + private final Map> responseMap; + private final List indexResponses; FieldCapabilitiesResponse(String[] indices, Map> responseMap) { this(indices, responseMap, Collections.emptyList()); @@ -67,6 +67,17 @@ private FieldCapabilitiesResponse(String[] indices, Map getField(String field) { return responseMap.get(field); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_7_2_0)) { - indices = in.readStringArray(); - } else { - indices = Strings.EMPTY_ARRAY; - } - this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); - indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); - } - private static Map readField(StreamInput in) throws IOException { return in.readMap(StreamInput::readString, FieldCapabilities::new); } diff --git a/server/src/main/java/org/elasticsearch/action/get/GetAction.java b/server/src/main/java/org/elasticsearch/action/get/GetAction.java index a36ec9d2fdc79..589d24e8aafc5 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class GetAction extends ActionType { @@ -28,11 +27,7 @@ public class GetAction extends ActionType { public static final String NAME = "indices:data/read/get"; private GetAction() { - super(NAME); + super(NAME, GetResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return GetResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java index b7f16eeba4f76..c3362f7fc650c 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -50,7 +50,7 @@ public class GetResponse extends ActionResponse implements Iterable { +public class MultiGetAction extends ActionType { public static final MultiGetAction INSTANCE = new MultiGetAction(); public static final String NAME = "indices:data/read/mget"; private MultiGetAction() { - super(NAME); - } - - @Override - public MultiGetResponse newResponse() { - return new MultiGetResponse(); + super(NAME, MultiGetResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java index e104359a65e7c..d2ae4be9f98df 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java @@ -21,27 +21,33 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; /** * A single multi get response. */ -public class MultiGetItemResponse implements Streamable { +public class MultiGetItemResponse implements Writeable { - private GetResponse response; - private MultiGetResponse.Failure failure; - - MultiGetItemResponse() { - - } + private final GetResponse response; + private final MultiGetResponse.Failure failure; public MultiGetItemResponse(GetResponse response, MultiGetResponse.Failure failure) { this.response = response; this.failure = failure; } + MultiGetItemResponse(StreamInput in) throws IOException { + if (in.readBoolean()) { + failure = new MultiGetResponse.Failure(in); + response = null; + } else { + response = new GetResponse(in); + failure = null; + } + } + /** * The index name of the document. */ @@ -93,21 +99,6 @@ public MultiGetResponse.Failure getFailure() { return this.failure; } - public static MultiGetItemResponse readItemResponse(StreamInput in) throws IOException { - MultiGetItemResponse response = new MultiGetItemResponse(); - response.readFrom(in); - return response; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - if (in.readBoolean()) { - failure = MultiGetResponse.Failure.readFailure(in); - } else { - response = new GetResponse(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { if (failure != null) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index d7f69d06bbe7e..a9a6878998935 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -67,7 +67,7 @@ public class MultiGetRequest extends ActionRequest /** * A single get item. */ - public static class Item implements Streamable, IndicesRequest, ToXContentObject { + public static class Item implements Writeable, IndicesRequest, ToXContentObject { private String index; private String type; @@ -82,6 +82,18 @@ public Item() { } + public Item(StreamInput in) throws IOException { + index = in.readString(); + type = in.readOptionalString(); + id = in.readString(); + routing = in.readOptionalString(); + storedFields = in.readOptionalStringArray(); + version = in.readLong(); + versionType = VersionType.fromValue(in.readByte()); + + fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); + } + /** * Constructs a single get item. * @@ -181,25 +193,6 @@ public Item fetchSourceContext(FetchSourceContext fetchSourceContext) { return this; } - public static Item readItem(StreamInput in) throws IOException { - Item item = new Item(); - item.readFrom(in); - return item; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - index = in.readString(); - type = in.readOptionalString(); - id = in.readString(); - routing = in.readOptionalString(); - storedFields = in.readOptionalStringArray(); - version = in.readLong(); - versionType = VersionType.fromValue(in.readByte()); - - fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); @@ -272,6 +265,21 @@ public String toString() { boolean refresh; List items = new ArrayList<>(); + public MultiGetRequest() {} + + public MultiGetRequest(StreamInput in) throws IOException { + super(in); + preference = in.readOptionalString(); + refresh = in.readBoolean(); + realtime = in.readBoolean(); + + int size = in.readVInt(); + items = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + items.add(new Item(in)); + } + } + public List getItems() { return this.items; } @@ -530,20 +538,6 @@ public Iterator iterator() { return Collections.unmodifiableCollection(items).iterator(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - preference = in.readOptionalString(); - refresh = in.readBoolean(); - realtime = in.readBoolean(); - - int size = in.readVInt(); - items = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - items.add(Item.readItem(in)); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index ca0f7aac32c64..78e7e92fa7af3 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -48,15 +48,12 @@ public class MultiGetResponse extends ActionResponse implements Iterable { +public class IndexAction extends ActionType { public static final IndexAction INSTANCE = new IndexAction(); public static final String NAME = "indices:data/write/index"; private IndexAction() { - super(NAME); - } - - @Override - public IndexResponse newResponse() { - return new IndexResponse(); + super(NAME, IndexResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 1ad029f616d02..aa5080d5e2a09 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -613,11 +613,6 @@ public void resolveRouting(MetaData metaData) { routing(metaData.resolveWriteIndexRouting(routing, index)); } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 3174e4d8ab187..75ad2e106a05d 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -37,7 +38,8 @@ */ public class IndexResponse extends DocWriteResponse { - public IndexResponse() { + public IndexResponse(StreamInput in) throws IOException { + super(in); } public IndexResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean created) { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java index 55565960c658c..464c601c7bc59 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class DeletePipelineAction extends ActionType { @@ -29,11 +28,7 @@ public class DeletePipelineAction extends ActionType { public static final String NAME = "cluster:admin/ingest/pipeline/delete"; public DeletePipelineAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequest.java index 65afad730a0ba..9133dee230343 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequest.java @@ -39,6 +39,11 @@ public DeletePipelineRequest(String id) { this.id = id; } + public DeletePipelineRequest(StreamInput in) throws IOException { + super(in); + id = in.readString(); + } + DeletePipelineRequest() { } @@ -55,12 +60,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java index 8f69d20968014..5ea42e884434c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java @@ -44,7 +44,7 @@ public class DeletePipelineTransportAction extends TransportMasterNodeAction listener) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineAction.java index 48340af4039c3..ebc43dfe663b9 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class GetPipelineAction extends StreamableResponseActionType { +public class GetPipelineAction extends ActionType { public static final GetPipelineAction INSTANCE = new GetPipelineAction(); public static final String NAME = "cluster:admin/ingest/pipeline/get"; public GetPipelineAction() { - super(NAME); - } - - @Override - public GetPipelineResponse newResponse() { - return new GetPipelineResponse(); + super(NAME, GetPipelineResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequest.java index f34f157063cd3..0a0cc3f2244ab 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequest.java @@ -61,9 +61,4 @@ public String[] getIds() { public ActionRequestValidationException validate() { return null; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java index 23641162fe35d..509e120c0aca4 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java @@ -44,7 +44,13 @@ public class GetPipelineResponse extends ActionResponse implements StatusToXCont private List pipelines; - public GetPipelineResponse() { + public GetPipelineResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + pipelines = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + pipelines.add(PipelineConfiguration.readFrom(in)); + } } public GetPipelineResponse(List pipelines) { @@ -60,16 +66,6 @@ public List pipelines() { return Collections.unmodifiableList(pipelines); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - pipelines = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - pipelines.add(PipelineConfiguration.readFrom(in)); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(pipelines.size()); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java index 4501629284e63..3e723bfb571c6 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java @@ -28,11 +28,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class GetPipelineTransportAction extends TransportMasterNodeReadAction { @Inject @@ -49,8 +52,8 @@ protected String executor() { } @Override - protected GetPipelineResponse newResponse() { - return new GetPipelineResponse(); + protected GetPipelineResponse read(StreamInput in) throws IOException { + return new GetPipelineResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java index 1a2aa7725129a..f19f1f2b1ddb5 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class PutPipelineAction extends ActionType { @@ -29,11 +28,7 @@ public class PutPipelineAction extends ActionType { public static final String NAME = "cluster:admin/ingest/pipeline/put"; public PutPipelineAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java index 46a34717c89c0..212921f0e5a95 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java @@ -46,6 +46,13 @@ public PutPipelineRequest(String id, BytesReference source, XContentType xConten this.xContentType = Objects.requireNonNull(xContentType); } + public PutPipelineRequest(StreamInput in) throws IOException { + super(in); + id = in.readString(); + source = in.readBytesReference(); + xContentType = in.readEnum(XContentType.class); + } + PutPipelineRequest() { } @@ -66,14 +73,6 @@ public XContentType getXContentType() { return xContentType; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - source = in.readBytesReference(); - xContentType = in.readEnum(XContentType.class); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index 95a9dbc7254b3..7250369fae4be 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -54,7 +54,7 @@ public PutPipelineTransportAction(ThreadPool threadPool, TransportService transp IngestService ingestService, NodeClient client) { super( PutPipelineAction.NAME, transportService, ingestService.getClusterService(), - threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new + threadPool, actionFilters, PutPipelineRequest::new, indexNameExpressionResolver ); this.client = client; this.ingestService = ingestService; @@ -70,11 +70,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, PutPipelineRequest request, ClusterState state, ActionListener listener) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineAction.java index befa729282e7e..3e76c96834c8a 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class SimulatePipelineAction extends StreamableResponseActionType { +public class SimulatePipelineAction extends ActionType { public static final SimulatePipelineAction INSTANCE = new SimulatePipelineAction(); public static final String NAME = "cluster:admin/ingest/pipeline/simulate"; public SimulatePipelineAction() { - super(NAME); - } - - @Override - public SimulatePipelineResponse newResponse() { - return new SimulatePipelineResponse(); + super(NAME, SimulatePipelineResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index eb15b56db31cc..de0b0c18a9113 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -102,11 +102,6 @@ public XContentType getXContentType() { return xContentType; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java index c1efaf6c54ba6..8597605879d59 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java @@ -103,8 +103,21 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte new ParseField(Fields.DOCUMENTS)); } - public SimulatePipelineResponse() { - + public SimulatePipelineResponse(StreamInput in) throws IOException { + super(in); + this.pipelineId = in.readOptionalString(); + boolean verbose = in.readBoolean(); + int responsesLength = in.readVInt(); + results = new ArrayList<>(); + for (int i = 0; i < responsesLength; i++) { + SimulateDocumentResult simulateDocumentResult; + if (verbose) { + simulateDocumentResult = new SimulateDocumentVerboseResult(in); + } else { + simulateDocumentResult = new SimulateDocumentBaseResult(in); + } + results.add(simulateDocumentResult); + } } public SimulatePipelineResponse(String pipelineId, boolean verbose, List responses) { @@ -135,24 +148,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.pipelineId = in.readOptionalString(); - boolean verbose = in.readBoolean(); - int responsesLength = in.readVInt(); - results = new ArrayList<>(); - for (int i = 0; i < responsesLength; i++) { - SimulateDocumentResult simulateDocumentResult; - if (verbose) { - simulateDocumentResult = new SimulateDocumentVerboseResult(in); - } else { - simulateDocumentResult = new SimulateDocumentBaseResult(in); - } - results.add(simulateDocumentResult); - } - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/main/MainAction.java b/server/src/main/java/org/elasticsearch/action/main/MainAction.java index 535c63d2a8800..e8633c018fbf1 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainAction.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.main; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class MainAction extends StreamableResponseActionType { +public class MainAction extends ActionType { public static final String NAME = "cluster:monitor/main"; public static final MainAction INSTANCE = new MainAction(); public MainAction() { - super(NAME); - } - - @Override - public MainResponse newResponse() { - return new MainResponse(); + super(NAME, MainResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/main/MainRequest.java b/server/src/main/java/org/elasticsearch/action/main/MainRequest.java index 1736e56a8dc06..ac8026fb21f10 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainRequest.java @@ -21,12 +21,20 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; public class MainRequest extends ActionRequest { + public MainRequest() {} + + MainRequest(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; } - } diff --git a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java index 3e071f5cd048f..aaffbeacfba51 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -42,7 +42,15 @@ public class MainResponse extends ActionResponse implements ToXContentObject { private String clusterUuid; private Build build; - MainResponse() { + MainResponse() {} + + MainResponse(StreamInput in) throws IOException { + super(in); + nodeName = in.readString(); + version = Version.readVersion(in); + clusterName = new ClusterName(in); + clusterUuid = in.readString(); + build = Build.readBuild(in); } public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { @@ -83,16 +91,6 @@ public void writeTo(StreamOutput out) throws IOException { Build.writeBuild(build, out); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - nodeName = in.readString(); - version = Version.readVersion(in); - clusterName = new ClusterName(in); - clusterUuid = in.readString(); - build = Build.readBuild(in); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java index 78b87435a4f34..c72acddadf6ec 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java @@ -64,11 +64,6 @@ public Translog.Operation[] getOperations() { return operations; } - @Override - public void readFrom(final StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationResponse.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationResponse.java index f3dbea0476327..2180ef7d6ef29 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationResponse.java +++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationResponse.java @@ -20,9 +20,18 @@ import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; public final class ResyncReplicationResponse extends ReplicationResponse implements WriteResponse { + public ResyncReplicationResponse() {} + + public ResyncReplicationResponse(StreamInput in) throws IOException { + super(in); + } + @Override public void setForcedRefresh(boolean forcedRefresh) { // ignore diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 464cd3168bf7e..095f5e27a62fb 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -63,8 +63,8 @@ public TransportResyncReplicationAction(Settings settings, TransportService tran } @Override - protected ResyncReplicationResponse newResponseInstance() { - return new ResyncReplicationResponse(); + protected ResyncReplicationResponse newResponseInstance(StreamInput in) throws IOException { + return new ResyncReplicationResponse(in); } @Override @@ -137,9 +137,7 @@ public void sync(ResyncReplicationRequest request, Task parentTask, String prima new TransportResponseHandler() { @Override public ResyncReplicationResponse read(StreamInput in) throws IOException { - ResyncReplicationResponse response = newResponseInstance(); - response.readFrom(in); - return response; + return newResponseInstance(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java index c7b959ed0bd44..00d594d01845e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.search; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ClearScrollAction extends StreamableResponseActionType { +public class ClearScrollAction extends ActionType { public static final ClearScrollAction INSTANCE = new ClearScrollAction(); public static final String NAME = "indices:data/read/scroll/clear"; private ClearScrollAction() { - super(NAME); - } - - @Override - public ClearScrollResponse newResponse() { - return new ClearScrollResponse(); + super(NAME, ClearScrollResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java index 4770818867c84..c287429983e11 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java @@ -38,6 +38,13 @@ public class ClearScrollRequest extends ActionRequest implements ToXContentObjec private List scrollIds; + public ClearScrollRequest() {} + + public ClearScrollRequest(StreamInput in) throws IOException { + super(in); + scrollIds = Arrays.asList(in.readStringArray()); + } + public List getScrollIds() { return scrollIds; } @@ -70,12 +77,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - scrollIds = Arrays.asList(in.readStringArray()); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java index cf2cc9a0a9f44..32ab3bf045ffb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java @@ -48,15 +48,18 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont PARSER.declareField(constructorArg(), (parser, context) -> parser.intValue(), NUMFREED, ObjectParser.ValueType.INT); } - private boolean succeeded; - private int numFreed; + private final boolean succeeded; + private final int numFreed; public ClearScrollResponse(boolean succeeded, int numFreed) { this.succeeded = succeeded; this.numFreed = numFreed; } - ClearScrollResponse() { + public ClearScrollResponse(StreamInput in) throws IOException { + super(in); + succeeded = in.readBoolean(); + numFreed = in.readVInt(); } /** @@ -95,13 +98,6 @@ public static ClearScrollResponse fromXContent(XContentParser parser) throws IOE return PARSER.apply(parser, null); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - succeeded = in.readBoolean(); - numFreed = in.readVInt(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(succeeded); diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java index dd34dd94e69f7..d9b50cd7c4253 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class MultiSearchAction extends ActionType { @@ -28,11 +27,6 @@ public class MultiSearchAction extends ActionType { public static final String NAME = "indices:data/read/msearch"; private MultiSearchAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return MultiSearchResponse::new; + super(NAME, MultiSearchResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 25500efd5ed5b..e6e11e5eeb5c4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -56,10 +56,12 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice public static final int MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT = 0; private int maxConcurrentSearchRequests = 0; - private List requests = new ArrayList<>(); + private final List requests = new ArrayList<>(); private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); + public MultiSearchRequest() {} + /** * Add a search request to execute. Note, the order is important, the search response will be returned in the * same order as the search requests. @@ -129,9 +131,9 @@ public MultiSearchRequest indicesOptions(IndicesOptions indicesOptions) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + + public MultiSearchRequest(StreamInput in) throws IOException { + super(in); maxConcurrentSearchRequests = in.readVInt(); int size = in.readVInt(); for (int i = 0; i < size; i++) { diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index 3fa2a57c8386d..e20d419823e9d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -153,11 +153,6 @@ public TimeValue getTook() { return new TimeValue(tookInMillis); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(items.length); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchAction.java index 402219001a28d..d4bbe197d6cd4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class SearchAction extends ActionType { @@ -28,11 +27,7 @@ public class SearchAction extends ActionType { public static final String NAME = "indices:data/read/search"; private SearchAction() { - super(NAME); + super(NAME, SearchResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return SearchResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 602853e10b292..8ba9a8c9f0bd7 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -581,11 +581,6 @@ public String getDescription() { }; } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public boolean equals(Object o) { if (this == o) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index c69b67d5f4586..cb36dbd0cd8f2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -360,11 +360,6 @@ static SearchResponse innerFromXContent(XContentParser parser) throws IOExceptio failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters); } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { internalResponse.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java index 32c84f86ea038..9cdf83c25b11b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class SearchScrollAction extends ActionType { @@ -28,11 +27,7 @@ public class SearchScrollAction extends ActionType { public static final String NAME = "indices:data/read/scroll"; private SearchScrollAction() { - super(NAME); + super(NAME, SearchResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return SearchResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java index 68f6a6afce091..5815df7cb96fa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java @@ -112,11 +112,6 @@ public SearchScrollRequest scroll(String keepAlive) { return scroll(new Scroll(TimeValue.parseTimeValue(keepAlive, null, getClass().getSimpleName() + ".keepAlive"))); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 04e198915b3b2..37c8fe4fcbd85 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -220,11 +220,7 @@ public long id() { return this.id; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } - } static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest { private OriginalIndices originalIndices; @@ -264,19 +260,12 @@ public IndicesOptions indicesOptions() { return originalIndices.indicesOptions(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } - } public static class SearchFreeContextResponse extends TransportResponse { private boolean freed; - SearchFreeContextResponse() { - } - SearchFreeContextResponse(StreamInput in) throws IOException { freed = in.readBoolean(); } @@ -289,12 +278,6 @@ public boolean isFreed() { return freed; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - freed = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(freed); @@ -314,8 +297,9 @@ public static void registerRequestHandler(TransportService transportService, Sea channel.sendResponse(new SearchFreeContextResponse(freed)); }); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, SearchFreeContextResponse::new); - transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE, - ThreadPool.Names.SAME, (request, channel, task) -> { + transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ThreadPool.Names.SAME, + TransportRequest.Empty::new, + (request, channel, task) -> { searchService.freeAllScrollContexts(); channel.sendResponse(TransportResponse.Empty.INSTANCE); }); diff --git a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index cfd23e3c77368..373a4685bc99b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -54,7 +54,15 @@ public class ShardSearchFailure extends ShardOperationFailedException { private SearchShardTarget shardTarget; - ShardSearchFailure() { + ShardSearchFailure(StreamInput in) throws IOException { + shardTarget = in.readOptionalWriteable(SearchShardTarget::new); + if (shardTarget != null) { + index = shardTarget.getFullyQualifiedIndexName(); + shardId = shardTarget.getShardId().getId(); + } + reason = in.readString(); + status = RestStatus.readFrom(in); + cause = in.readException(); } public ShardSearchFailure(Exception e) { @@ -91,21 +99,8 @@ public String toString() { } public static ShardSearchFailure readShardSearchFailure(StreamInput in) throws IOException { - ShardSearchFailure shardSearchFailure = new ShardSearchFailure(); - shardSearchFailure.readFrom(in); - return shardSearchFailure; - } + return new ShardSearchFailure(in); - @Override - public void readFrom(StreamInput in) throws IOException { - shardTarget = in.readOptionalWriteable(SearchShardTarget::new); - if (shardTarget != null) { - index = shardTarget.getFullyQualifiedIndexName(); - shardId = shardTarget.getShardId().getId(); - } - reason = in.readString(); - status = RestStatus.readFrom(in); - cause = in.readException(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index a2fa140c149e0..66aa15c569279 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -51,7 +52,7 @@ public class TransportMultiSearchAction extends HandledTransportAction) MultiSearchRequest::new); this.threadPool = threadPool; this.clusterService = clusterService; this.availableProcessors = EsExecutors.numberOfProcessors(settings); @@ -62,7 +63,7 @@ public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, Tran TransportMultiSearchAction(ThreadPool threadPool, ActionFilters actionFilters, TransportService transportService, ClusterService clusterService, int availableProcessors, LongSupplier relativeTimeProvider, NodeClient client) { - super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new); + super(MultiSearchAction.NAME, transportService, actionFilters, (Writeable.Reader) MultiSearchRequest::new); this.threadPool = threadPool; this.clusterService = clusterService; this.availableProcessors = availableProcessors; @@ -85,6 +86,7 @@ protected void doExecute(Task task, MultiSearchRequest request, ActionListener searchRequestSlots = new ConcurrentLinkedQueue<>(); for (int i = 0; i < request.requests().size(); i++) { SearchRequest searchRequest = request.requests().get(i); + searchRequest.setParentTask(client.getLocalNodeId(), task.getId()); searchRequestSlots.add(new SearchRequestSlot(searchRequest, i)); } diff --git a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index aa3e91c634a80..f8a3baf96e2a9 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -51,7 +51,10 @@ public class DefaultShardOperationFailedException extends ShardOperationFailedEx PARSER.declareObject(constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(REASON)); } - protected DefaultShardOperationFailedException() { + protected DefaultShardOperationFailedException() {} + + protected DefaultShardOperationFailedException(StreamInput in) throws IOException { + readFrom(in, this); } public DefaultShardOperationFailedException(ElasticsearchException e) { @@ -64,17 +67,14 @@ public DefaultShardOperationFailedException(String index, int shardId, Throwable } public static DefaultShardOperationFailedException readShardOperationFailed(StreamInput in) throws IOException { - DefaultShardOperationFailedException exp = new DefaultShardOperationFailedException(); - exp.readFrom(in); - return exp; + return new DefaultShardOperationFailedException(in); } - @Override - public void readFrom(StreamInput in) throws IOException { - index = in.readOptionalString(); - shardId = in.readVInt(); - cause = in.readException(); - status = RestStatus.readFrom(in); + public static void readFrom(StreamInput in, DefaultShardOperationFailedException f) throws IOException { + f.index = in.readOptionalString(); + f.shardId = in.readVInt(); + f.cause = in.readException(); + f.status = RestStatus.readFrom(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index ca10583ce248a..4ac545ff41ecf 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -27,17 +27,11 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; -import java.util.function.Supplier; - /** * A TransportAction that self registers a handler into the transport service */ public abstract class HandledTransportAction extends TransportAction { - protected HandledTransportAction(String actionName, TransportService transportService, - ActionFilters actionFilters, Supplier request) { - this(actionName, true, transportService, actionFilters, request); - } protected HandledTransportAction(String actionName, TransportService transportService, ActionFilters actionFilters, Writeable.Reader requestReader) { @@ -49,20 +43,6 @@ protected HandledTransportAction(String actionName, TransportService transportSe this(actionName, true, transportService, actionFilters, requestReader, executor); } - protected HandledTransportAction(String actionName, boolean canTripCircuitBreaker, - TransportService transportService, ActionFilters actionFilters, Supplier request) { - super(actionName, actionFilters, transportService.getTaskManager()); - transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, false, canTripCircuitBreaker, - new TransportHandler()); - } - - protected HandledTransportAction(String actionName, TransportService transportService, ActionFilters actionFilters, - Supplier request, String executor) { - super(actionName, actionFilters, transportService.getTaskManager()); - transportService.registerRequestHandler(actionName, request, executor, false, true, - new TransportHandler()); - } - protected HandledTransportAction(String actionName, boolean canTripCircuitBreaker, TransportService transportService, ActionFilters actionFilters, Writeable.Reader requestReader) { diff --git a/server/src/main/java/org/elasticsearch/action/support/WriteRequest.java b/server/src/main/java/org/elasticsearch/action/support/WriteRequest.java index 50edcd39bd16e..a163f0f63d047 100644 --- a/server/src/main/java/org/elasticsearch/action/support/WriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/WriteRequest.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; @@ -35,7 +34,7 @@ * Interface implemented by requests that modify the documents in an index like {@link IndexRequest}, {@link UpdateRequest}, and * {@link BulkRequest}. Rather than implement this directly most implementers should extend {@link ReplicatedWriteRequest}. */ -public interface WriteRequest> extends Streamable { +public interface WriteRequest> extends Writeable { /** * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java index d19b28036b92b..e39301b4a6ff1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java @@ -33,16 +33,13 @@ public class BroadcastRequest> extends protected String[] indices; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); - public BroadcastRequest() { - } - public BroadcastRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); } - protected BroadcastRequest(String[] indices) { + protected BroadcastRequest(String... indices) { this.indices = indices; } @@ -85,11 +82,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArrayNullable(indices); indicesOptions.writeIndicesOptions(out); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index 1cf1488f82236..9f5474c916d74 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -66,7 +66,19 @@ protected static void declareBroadcastFields(Const PARSER.declareObject(constructorArg(), shardsParser, _SHARDS_FIELD); } - public BroadcastResponse() { + public BroadcastResponse() {} + + public BroadcastResponse(StreamInput in) throws IOException { + totalShards = in.readVInt(); + successfulShards = in.readVInt(); + failedShards = in.readVInt(); + int size = in.readVInt(); + if (size > 0) { + shardFailures = new DefaultShardOperationFailedException[size]; + for (int i = 0; i < size; i++) { + shardFailures[i] = readShardOperationFailed(in); + } + } } public BroadcastResponse(int totalShards, int successfulShards, int failedShards, @@ -120,21 +132,6 @@ public DefaultShardOperationFailedException[] getShardFailures() { return shardFailures; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - totalShards = in.readVInt(); - successfulShards = in.readVInt(); - failedShards = in.readVInt(); - int size = in.readVInt(); - if (size > 0) { - shardFailures = new DefaultShardOperationFailedException[size]; - for (int i = 0; i < size; i++) { - shardFailures[i] = readShardOperationFailed(in); - } - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(totalShards); diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java index f61ff4ac748b0..df6781aa0cc88 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java @@ -35,7 +35,12 @@ public abstract class BroadcastShardRequest extends TransportRequest implements protected OriginalIndices originalIndices; - public BroadcastShardRequest() { + protected BroadcastShardRequest() {} + + public BroadcastShardRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); + originalIndices = OriginalIndices.readOriginalIndices(in); } protected BroadcastShardRequest(ShardId shardId, BroadcastRequest> request) { @@ -57,13 +62,6 @@ public IndicesOptions indicesOptions() { return originalIndices.indicesOptions(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - originalIndices = OriginalIndices.readOriginalIndices(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java index 53966727a53b5..de935066758da 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java @@ -30,8 +30,9 @@ public abstract class BroadcastShardResponse extends TransportResponse { ShardId shardId; - protected BroadcastShardResponse() { - + protected BroadcastShardResponse(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); } protected BroadcastShardResponse(ShardId shardId) { @@ -46,12 +47,6 @@ public ShardId getShardId() { return this.shardId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 15daaf786b604..3a29006066a0a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -48,7 +49,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceArray; -import java.util.function.Supplier; public abstract class TransportBroadcastAction< Request extends BroadcastRequest, @@ -66,8 +66,8 @@ public abstract class TransportBroadcastAction< protected TransportBroadcastAction(String actionName, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - Supplier shardRequest, String shardExecutor) { + IndexNameExpressionResolver indexNameExpressionResolver, Writeable.Reader request, + Writeable.Reader shardRequest, String shardExecutor) { super(actionName, transportService, actionFilters, request); this.clusterService = clusterService; this.transportService = transportService; @@ -75,7 +75,7 @@ protected TransportBroadcastAction(String actionName, ClusterService clusterServ this.transportShardAction = actionName + "[s]"; this.shardExecutor = shardExecutor; - transportService.registerRequestHandler(transportShardAction, shardRequest, ThreadPool.Names.SAME, new ShardTransportHandler()); + transportService.registerRequestHandler(transportShardAction, ThreadPool.Names.SAME, shardRequest, new ShardTransportHandler()); } @Override @@ -87,7 +87,7 @@ protected void doExecute(Task task, Request request, ActionListener li protected abstract ShardRequest newShardRequest(int numShards, ShardRouting shard, Request request); - protected abstract ShardResponse newShardResponse(); + protected abstract ShardResponse readShardResponse(StreamInput in) throws IOException; protected abstract ShardResponse shardOperation(ShardRequest request, Task task) throws IOException; @@ -180,9 +180,7 @@ protected void performOperation(final ShardIterator shardIt, final ShardRouting new TransportResponseHandler() { @Override public ShardResponse read(StreamInput in) throws IOException { - ShardResponse response = newShardResponse(); - response.readFrom(in); - return response; + return readShardResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index f3916873a24f9..b5fc85d6234e5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -42,7 +42,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.NodeShouldNotConnectException; @@ -61,7 +61,6 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceArray; -import java.util.function.Supplier; /** * Abstraction for transporting aggregated shard-level operations in a single request (NodeRequest) per-node @@ -76,7 +75,7 @@ */ public abstract class TransportBroadcastByNodeAction, Response extends BroadcastResponse, - ShardOperationResult extends Streamable> extends HandledTransportAction { + ShardOperationResult extends Writeable> extends HandledTransportAction { private final ClusterService clusterService; private final TransportService transportService; @@ -90,7 +89,7 @@ public TransportBroadcastByNodeAction( TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Supplier request, + Writeable.Reader request, String executor) { this(actionName, clusterService, transportService, actionFilters, indexNameExpressionResolver, request, executor, true); } @@ -101,7 +100,7 @@ public TransportBroadcastByNodeAction( TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Supplier request, + Writeable.Reader request, String executor, boolean canTripCircuitBreaker) { super(actionName, canTripCircuitBreaker, transportService, actionFilters, request); @@ -112,7 +111,7 @@ public TransportBroadcastByNodeAction( transportNodeBroadcastAction = actionName + "[n]"; - transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, false, canTripCircuitBreaker, + transportService.registerRequestHandler(transportNodeBroadcastAction, executor, false, canTripCircuitBreaker, NodeRequest::new, new BroadcastByNodeTransportRequestHandler()); } @@ -314,9 +313,7 @@ private void sendNodeRequest(final DiscoveryNode node, List shards transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new TransportResponseHandler() { @Override public NodeResponse read(StreamInput in) throws IOException { - NodeResponse nodeResponse = new NodeResponse(); - nodeResponse.readFrom(in); - return nodeResponse; + return new NodeResponse(in); } @Override @@ -455,7 +452,11 @@ public class NodeRequest extends TransportRequest implements IndicesRequest { protected Request indicesLevelRequest; - public NodeRequest() { + public NodeRequest(StreamInput in) throws IOException { + super(in); + indicesLevelRequest = readRequestFrom(in); + shards = in.readList(ShardRouting::new); + nodeId = in.readString(); } public NodeRequest(String nodeId, Request request, List shards) { @@ -482,14 +483,6 @@ public IndicesOptions indicesOptions() { return indicesLevelRequest.indicesOptions(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indicesLevelRequest = readRequestFrom(in); - shards = in.readList(ShardRouting::new); - nodeId = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -505,7 +498,16 @@ class NodeResponse extends TransportResponse { protected List exceptions; protected List results; - NodeResponse() { + NodeResponse(StreamInput in) throws IOException { + super(in); + nodeId = in.readString(); + totalShards = in.readVInt(); + results = in.readList((stream) -> stream.readBoolean() ? readShardResult(stream) : null); + if (in.readBoolean()) { + exceptions = in.readList(BroadcastShardOperationFailedException::new); + } else { + exceptions = null; + } } NodeResponse(String nodeId, @@ -534,26 +536,13 @@ public List getExceptions() { return exceptions; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - nodeId = in.readString(); - totalShards = in.readVInt(); - results = in.readList((stream) -> stream.readBoolean() ? readShardResult(stream) : null); - if (in.readBoolean()) { - exceptions = in.readList(BroadcastShardOperationFailedException::new); - } else { - exceptions = null; - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); out.writeVInt(totalShards); out.writeVInt(results.size()); for (ShardOperationResult result : results) { - out.writeOptionalStreamable(result); + out.writeOptionalWriteable(result); } out.writeBoolean(exceptions != null); if (exceptions != null) { @@ -566,19 +555,15 @@ public void writeTo(StreamOutput out) throws IOException { * Can be used for implementations of {@link #shardOperation(BroadcastRequest, ShardRouting) shardOperation} for * which there is no shard-level return value. */ - public static final class EmptyResult implements Streamable { + public static final class EmptyResult implements Writeable { public static EmptyResult INSTANCE = new EmptyResult(); - private EmptyResult() { - } + private EmptyResult() {} - @Override - public void readFrom(StreamInput in) throws IOException { - } + private EmptyResult(StreamInput in) {} @Override - public void writeTo(StreamOutput out) throws IOException { - } + public void writeTo(StreamOutput out) { } public static EmptyResult readEmptyResultFrom(StreamInput in) { return INSTANCE; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index 93a259a443679..c4c5cd7101db5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -81,12 +81,6 @@ public TimeValue ackTimeout() { return timeout; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - timeout = in.readTimeValue(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java index 3752d36fb5516..923a93396a2e1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java @@ -64,11 +64,6 @@ public final boolean isAcknowledged() { return acknowledged; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(acknowledged); diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadRequest.java index 3045bef46b8d1..2f08b9dbcc100 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadRequest.java @@ -59,9 +59,4 @@ public final Request local(boolean local) { public final boolean local() { return local; } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 2bf342603a2c9..071b271f9dd05 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -68,13 +68,4 @@ public final Request masterNodeTimeout(String timeout) { public final TimeValue masterNodeTimeout() { return this.masterNodeTimeout; } - - @Override - public void readFrom(StreamInput in) throws IOException { - // TODO(talevy): throw exception once all MasterNodeRequest - // subclasses have been migrated to Writeable Readers - super.readFrom(in); - masterNodeTimeout = in.readTimeValue(); - } - } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 03c7346df5aab..e90d0a3743d86 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -37,7 +37,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -50,7 +49,6 @@ import java.io.IOException; import java.util.function.Predicate; -import java.util.function.Supplier; /** * A base class for operations that needs to be performed on the master node. @@ -65,30 +63,12 @@ public abstract class TransportMasterNodeAction request) { - this(actionName, true, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, request); - } - protected TransportMasterNodeAction(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver) { this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); } - protected TransportMasterNodeAction(String actionName, boolean canTripCircuitBreaker, - TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Supplier request) { - super(actionName, canTripCircuitBreaker, transportService, actionFilters, request); - this.transportService = transportService; - this.clusterService = clusterService; - this.threadPool = threadPool; - this.indexNameExpressionResolver = indexNameExpressionResolver; - this.executor = executor(); - } - protected TransportMasterNodeAction(String actionName, boolean canTripCircuitBreaker, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, Writeable.Reader request, @@ -103,20 +83,7 @@ protected TransportMasterNodeAction(String actionName, boolean canTripCircuitBre protected abstract String executor(); - /** - * @deprecated new implementors should override {@link #read(StreamInput)} and use the - * {@link Writeable.Reader} interface. - * @return a new response instance. Typically this is used for serialization using the - * {@link Streamable#readFrom(StreamInput)} method. - */ - @Deprecated - protected abstract Response newResponse(); - - protected Response read(StreamInput in) throws IOException { - Response response = newResponse(); - response.readFrom(in); - return response; - } + protected abstract Response read(StreamInput in) throws IOException; protected abstract void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java index 93c7729a9d236..194439019c9a0 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java @@ -27,8 +27,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.function.Supplier; - /** * A base class for read operations that needs to be performed on the master node. * Can also be executed on the local node if needed. @@ -36,25 +34,12 @@ public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends TransportMasterNodeAction { - protected TransportMasterNodeReadAction(String actionName, TransportService transportService, - ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { - this(actionName, true, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,request); - } - protected TransportMasterNodeReadAction(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver) { this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); } - protected TransportMasterNodeReadAction(String actionName, boolean checkSizeLimit, TransportService transportService, - ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { - super(actionName, checkSizeLimit, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver,request); - } - protected TransportMasterNodeReadAction(String actionName, boolean checkSizeLimit, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver) { diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java index 9c730d243e3b6..d41d9dcec926f 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java @@ -86,14 +86,4 @@ public String[] types() { public IndicesOptions indicesOptions() { return indicesOptions; } - - @Override - public void readFrom(StreamInput in) throws IOException { - // TODO(talevy): once all ClusterInfoRequest objects are converted, remove this - super.readFrom(in); - indices = in.readStringArray(); - types = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - // throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeRequest.java index 21b09bdaaae0a..3b40ddfd65b78 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeRequest.java @@ -31,9 +31,8 @@ public abstract class BaseNodeRequest extends TransportRequest { public BaseNodeRequest() {} - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public BaseNodeRequest(StreamInput in) throws IOException { + super(in); if (in.getVersion().before(Version.V_7_3_0)) { in.readString(); // previously nodeId } diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeResponse.java b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeResponse.java index be66f158315d8..4035ff172cb65 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeResponse.java @@ -33,7 +33,9 @@ public abstract class BaseNodeResponse extends TransportResponse { private DiscoveryNode node; - protected BaseNodeResponse() { + protected BaseNodeResponse(StreamInput in) throws IOException { + super(in); + node = new DiscoveryNode(in); } protected BaseNodeResponse(DiscoveryNode node) { @@ -48,12 +50,6 @@ public DiscoveryNode getNode() { return node; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - node = new DiscoveryNode(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java index ca2932291fd3a..558f0d3e56ccc 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java @@ -50,8 +50,11 @@ public abstract class BaseNodesRequest private TimeValue timeout; - protected BaseNodesRequest() { - + protected BaseNodesRequest(StreamInput in) throws IOException { + super(in); + nodesIds = in.readStringArray(); + concreteNodes = in.readOptionalArray(DiscoveryNode::new, DiscoveryNode[]::new); + timeout = in.readOptionalTimeValue(); } protected BaseNodesRequest(String... nodesIds) { @@ -101,14 +104,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - nodesIds = in.readStringArray(); - concreteNodes = in.readOptionalArray(DiscoveryNode::new, DiscoveryNode[]::new); - timeout = in.readOptionalTimeValue(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java index e65070cc7a1a8..312c0607c4d1f 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java @@ -38,7 +38,11 @@ public abstract class BaseNodesResponse private List nodes; private Map nodesMap; - protected BaseNodesResponse() { + protected BaseNodesResponse(StreamInput in) throws IOException { + super(in); + clusterName = new ClusterName(in); + nodes = readNodesFrom(in); + failures = in.readList(FailedNodeException::new); } protected BaseNodesResponse(ClusterName clusterName, List nodes, List failures) { @@ -100,14 +104,6 @@ public Map getNodesMap() { return nodesMap; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - clusterName = new ClusterName(in); - nodes = readNodesFrom(in); - failures = in.readList(FailedNodeException::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 43f661084a89d..304213066c790 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.NodeShouldNotConnectException; @@ -46,7 +47,6 @@ import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceArray; -import java.util.function.Supplier; public abstract class TransportNodesAction, NodesResponse extends BaseNodesResponse, @@ -63,7 +63,7 @@ public abstract class TransportNodesAction request, Supplier nodeRequest, String nodeExecutor, + Writeable.Reader request, Writeable.Reader nodeRequest, String nodeExecutor, Class nodeResponseClass) { super(actionName, transportService, actionFilters, request); this.threadPool = threadPool; @@ -74,7 +74,7 @@ protected TransportNodesAction(String actionName, ThreadPool threadPool, this.transportNodeAction = actionName + "[n]"; transportService.registerRequestHandler( - transportNodeAction, nodeRequest, nodeExecutor, new NodeTransportHandler()); + transportNodeAction, nodeExecutor, nodeRequest, new NodeTransportHandler()); } @Override @@ -121,7 +121,7 @@ protected NodesResponse newResponse(NodesRequest request, AtomicReferenceArray n protected abstract NodeRequest newNodeRequest(NodesRequest request); - protected abstract NodeResponse newNodeResponse(); + protected abstract NodeResponse newNodeResponse(StreamInput in) throws IOException; protected abstract NodeResponse nodeOperation(NodeRequest request, Task task); @@ -179,9 +179,7 @@ void start() { new TransportResponseHandler() { @Override public NodeResponse read(StreamInput in) throws IOException { - NodeResponse nodeResponse = newNodeResponse(); - nodeResponse.readFrom(in); - return nodeResponse; + return newNodeResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java index 25089d02b799f..28867791fc285 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java @@ -61,11 +61,6 @@ public RefreshPolicy getRefreshPolicy() { return refreshPolicy; } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 857103071e022..8a7f4df17be25 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -187,11 +187,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index 008b0095fb8d2..f9c1b11d1d41b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -50,10 +50,11 @@ public class ReplicationResponse extends ActionResponse { private ShardInfo shardInfo; - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardInfo = ReplicationResponse.ShardInfo.readShardInfo(in); + public ReplicationResponse() {} + + public ReplicationResponse(StreamInput in) throws IOException { + super(in); + shardInfo = new ReplicationResponse.ShardInfo(in); } @Override @@ -69,7 +70,7 @@ public void setShardInfo(ShardInfo shardInfo) { this.shardInfo = shardInfo; } - public static class ShardInfo implements Streamable, ToXContentObject { + public static class ShardInfo implements Writeable, ToXContentObject { private static final String TOTAL = "total"; private static final String SUCCESSFUL = "successful"; @@ -80,7 +81,16 @@ public static class ShardInfo implements Streamable, ToXContentObject { private int successful; private Failure[] failures = EMPTY; - public ShardInfo() { + public ShardInfo() {} + + public ShardInfo(StreamInput in) throws IOException { + total = in.readVInt(); + successful = in.readVInt(); + int size = in.readVInt(); + failures = new Failure[size]; + for (int i = 0; i < size; i++) { + failures[i] = new Failure(in); + } } public ShardInfo(int total, int successful, Failure... failures) { @@ -130,19 +140,6 @@ public RestStatus status() { return status; } - @Override - public void readFrom(StreamInput in) throws IOException { - total = in.readVInt(); - successful = in.readVInt(); - int size = in.readVInt(); - failures = new Failure[size]; - for (int i = 0; i < size; i++) { - Failure failure = new Failure(); - failure.readFrom(in); - failures[i] = failure; - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(total); @@ -217,12 +214,6 @@ public String toString() { '}'; } - static ShardInfo readShardInfo(StreamInput in) throws IOException { - ShardInfo shardInfo = new ShardInfo(); - shardInfo.readFrom(in); - return shardInfo; - } - public static class Failure extends ShardOperationFailedException implements ToXContentObject { private static final String _INDEX = "_index"; @@ -236,6 +227,16 @@ public static class Failure extends ShardOperationFailedException implements ToX private String nodeId; private boolean primary; + public Failure(StreamInput in) throws IOException { + shardId = new ShardId(in); + super.shardId = shardId.getId(); + index = shardId.getIndexName(); + nodeId = in.readOptionalString(); + cause = in.readException(); + status = RestStatus.readFrom(in); + primary = in.readBoolean(); + } + public Failure(ShardId shardId, @Nullable String nodeId, Exception cause, RestStatus status, boolean primary) { super(shardId.getIndexName(), shardId.getId(), ExceptionsHelper.detailedMessage(cause), status, cause); this.shardId = shardId; @@ -266,17 +267,6 @@ public boolean primary() { return primary; } - @Override - public void readFrom(StreamInput in) throws IOException { - shardId = new ShardId(in); - super.shardId = shardId.getId(); - index = shardId.getIndexName(); - nodeId = in.readOptionalString(); - cause = in.readException(); - status = RestStatus.readFrom(in); - primary = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java index 8ffc3cb92240d..3ea898d7218c8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.HandledTransportAction; @@ -29,6 +30,7 @@ import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -53,15 +55,17 @@ public abstract class TransportBroadcastReplicationAction, ShardResponse extends ReplicationResponse> extends HandledTransportAction { - private final TransportReplicationAction replicatedBroadcastShardAction; + private final ActionType replicatedBroadcastShardAction; private final ClusterService clusterService; private final IndexNameExpressionResolver indexNameExpressionResolver; + private final NodeClient client; public TransportBroadcastReplicationAction(String name, Writeable.Reader requestReader, ClusterService clusterService, - TransportService transportService, + TransportService transportService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportReplicationAction replicatedBroadcastShardAction) { + ActionType replicatedBroadcastShardAction) { super(name, transportService, actionFilters, requestReader); + this.client = client; this.replicatedBroadcastShardAction = replicatedBroadcastShardAction; this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; @@ -115,7 +119,7 @@ public void onFailure(Exception e) { protected void shardExecute(Task task, Request request, ShardId shardId, ActionListener shardActionListener) { ShardRequest shardRequest = newShardRequest(request, shardId); shardRequest.setParentTask(clusterService.localNode().getId(), task.getId()); - replicatedBroadcastShardAction.execute(shardRequest, shardActionListener); + client.executeLocally(replicatedBroadcastShardAction, shardRequest, shardActionListener); } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 0301c37cdeaf2..1fe2abf6698bd 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -168,7 +168,7 @@ protected ReplicationOperation.Replicas newReplicasProxy() { return new ReplicasProxy(); } - protected abstract Response newResponseInstance(); + protected abstract Response newResponseInstance(StreamInput in) throws IOException; /** * Resolves derived values in the request. For example, the target shard id of the incoming request, if not set at request construction. @@ -341,11 +341,7 @@ void runWithPrimaryShardReference(final PrimaryShardReference primaryShardRefere // phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase. final ShardRouting primary = primaryShardReference.routingEntry(); assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary; - final Writeable.Reader reader = in -> { - Response response = TransportReplicationAction.this.newResponseInstance(); - response.readFrom(in); - return response; - }; + final Writeable.Reader reader = TransportReplicationAction.this::newResponseInstance; DiscoveryNode relocatingNode = clusterState.nodes().get(primary.relocatingNodeId()); transportService.sendRequest(relocatingNode, transportPrimaryAction, new ConcreteShardRequest<>(primaryRequest.getRequest(), primary.allocationId().getRelocationId(), @@ -551,7 +547,7 @@ public void onNewClusterState(ClusterState state) { // opportunity to execute custom logic before the replica operation begins transportService.sendRequest(clusterService.localNode(), transportReplicaAction, replicaRequest, - new ActionListenerResponseHandler<>(onCompletionListener, in -> new ReplicaResponse())); + new ActionListenerResponseHandler<>(onCompletionListener, ReplicaResponse::new)); } @Override @@ -749,9 +745,7 @@ private void performAction(final DiscoveryNode node, final String action, final @Override public Response read(StreamInput in) throws IOException { - Response response = newResponseInstance(); - response.readFrom(in); - return response; + return newResponseInstance(in); } @Override @@ -963,8 +957,10 @@ public static class ReplicaResponse extends ActionResponse implements Replicatio private long localCheckpoint; private long globalCheckpoint; - ReplicaResponse() { - + ReplicaResponse(StreamInput in) throws IOException { + super(in); + localCheckpoint = in.readZLong(); + globalCheckpoint = in.readZLong(); } public ReplicaResponse(long localCheckpoint, long globalCheckpoint) { @@ -978,13 +974,6 @@ public ReplicaResponse(long localCheckpoint, long globalCheckpoint) { this.globalCheckpoint = globalCheckpoint; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - localCheckpoint = in.readZLong(); - globalCheckpoint = in.readZLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeZLong(localCheckpoint); @@ -1040,11 +1029,8 @@ public void performOn( } final ConcreteReplicaRequest replicaRequest = new ConcreteReplicaRequest<>( request, replica.allocationId().getId(), primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes); - final ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>(listener, in -> { - ReplicaResponse replicaResponse = new ReplicaResponse(); - replicaResponse.readFrom(in); - return replicaResponse; - }); + final ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>(listener, + ReplicaResponse::new); transportService.sendRequest(node, transportReplicaAction, replicaRequest, transportOptions, handler); } @@ -1114,11 +1100,6 @@ public String getDescription() { return "[" + request.getDescription() + "] for aID [" + targetAllocationID + "] and term [" + primaryTerm + "]"; } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(targetAllocationID); @@ -1162,11 +1143,6 @@ public ConcreteReplicaRequest(final R request, final String targetAllocationID, this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java index d1a93bde83f40..89513d6e7e2a1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -48,6 +48,18 @@ public abstract class InstanceShardOperationRequest, @@ -66,7 +66,7 @@ public abstract class TransportInstanceSingleOperationAction< protected TransportInstanceSingleOperationAction(String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Supplier request) { + Writeable.Reader request) { super(actionName, transportService, actionFilters, request); this.threadPool = threadPool; this.clusterService = clusterService; @@ -74,7 +74,7 @@ protected TransportInstanceSingleOperationAction(String actionName, ThreadPool t this.indexNameExpressionResolver = indexNameExpressionResolver; this.executor = executor(); this.shardActionName = actionName + "[s]"; - transportService.registerRequestHandler(shardActionName, request, executor, new ShardTransportHandler()); + transportService.registerRequestHandler(shardActionName, executor, request, new ShardTransportHandler()); } @Override @@ -86,7 +86,7 @@ protected void doExecute(Task task, Request request, ActionListener li protected abstract void shardOperation(Request request, ActionListener listener); - protected abstract Response newResponse(); + protected abstract Response newResponse(StreamInput in) throws IOException; protected ClusterBlockException checkGlobalBlock(ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); @@ -183,9 +183,7 @@ protected void doStart(ClusterState clusterState) { @Override public Response read(StreamInput in) throws IOException { - Response response = newResponse(); - response.readFrom(in); - return response; + return newResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index 707cbbeb1e3ae..a20e72d853af5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -265,9 +265,7 @@ private void start() { new TransportResponseHandler() { @Override public NodeTasksResponse read(StreamInput in) throws IOException { - NodeTasksResponse response = new NodeTasksResponse(); - response.readFrom(in); - return response; + return new NodeTasksResponse(in); } @Override @@ -368,28 +366,8 @@ private class NodeTasksResponse extends TransportResponse { protected List exceptions; protected List results; - NodeTasksResponse() { - } - - NodeTasksResponse(String nodeId, - List results, - List exceptions) { - this.nodeId = nodeId; - this.results = results; - this.exceptions = exceptions; - } - - public String getNodeId() { - return nodeId; - } - - public List getExceptions() { - return exceptions; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + NodeTasksResponse(StreamInput in) throws IOException { + super(in); nodeId = in.readString(); int resultsSize = in.readVInt(); results = new ArrayList<>(resultsSize); @@ -408,6 +386,22 @@ public void readFrom(StreamInput in) throws IOException { } } + NodeTasksResponse(String nodeId, + List results, + List exceptions) { + this.nodeId = nodeId; + this.results = results; + this.exceptions = exceptions; + } + + public String getNodeId() { + return nodeId; + } + + public List getExceptions() { + return exceptions; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java index e02969856e9b0..6b4014ed77b76 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.termvectors; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class MultiTermVectorsAction extends StreamableResponseActionType { +public class MultiTermVectorsAction extends ActionType { public static final MultiTermVectorsAction INSTANCE = new MultiTermVectorsAction(); public static final String NAME = "indices:data/read/mtv"; private MultiTermVectorsAction() { - super(NAME); - } - - @Override - public MultiTermVectorsResponse newResponse() { - return new MultiTermVectorsResponse(); + super(NAME, MultiTermVectorsResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java index 9cacf166952fb..fa67259e9e4ce 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java @@ -21,21 +21,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; /** * A single multi get response. */ -public class MultiTermVectorsItemResponse implements Streamable { +public class MultiTermVectorsItemResponse implements Writeable { - private TermVectorsResponse response; - private MultiTermVectorsResponse.Failure failure; - - MultiTermVectorsItemResponse() { - - } + private final TermVectorsResponse response; + private final MultiTermVectorsResponse.Failure failure; public MultiTermVectorsItemResponse(TermVectorsResponse response, MultiTermVectorsResponse.Failure failure) { assert (((response == null) && (failure != null)) || ((response != null) && (failure == null))); @@ -43,6 +39,16 @@ public MultiTermVectorsItemResponse(TermVectorsResponse response, MultiTermVecto this.failure = failure; } + MultiTermVectorsItemResponse(StreamInput in) throws IOException { + if (in.readBoolean()) { + failure = new MultiTermVectorsResponse.Failure(in); + response = null; + } else { + response = new TermVectorsResponse(in); + failure = null; + } + } + /** * The index name of the document. */ @@ -84,21 +90,6 @@ public MultiTermVectorsResponse.Failure getFailure() { return this.failure; } - public static MultiTermVectorsItemResponse readItemResponse(StreamInput in) throws IOException { - MultiTermVectorsItemResponse response = new MultiTermVectorsItemResponse(); - response.readFrom(in); - return response; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - if (in.readBoolean()) { - failure = MultiTermVectorsResponse.Failure.readFailure(in); - } else { - response = new TermVectorsResponse(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { if (failure != null) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index 96a207ca1a595..954e540e913a9 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -46,6 +46,18 @@ public class MultiTermVectorsRequest extends ActionRequest final Set ids = new HashSet<>(); + public MultiTermVectorsRequest(StreamInput in) throws IOException { + super(in); + preference = in.readOptionalString(); + int size = in.readVInt(); + requests = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + requests.add(new TermVectorsRequest(in)); + } + } + + public MultiTermVectorsRequest() {} + public MultiTermVectorsRequest add(TermVectorsRequest termVectorsRequest) { requests.add(termVectorsRequest); return this; @@ -132,17 +144,6 @@ public void add(TermVectorsRequest template, @Nullable XContentParser parser) th } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - preference = in.readOptionalString(); - int size = in.readVInt(); - requests = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - requests.add(new TermVectorsRequest(in)); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index 200ba68f8aaba..5546230f979ef 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -37,14 +37,10 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable /** * Represents a failure. */ - public static class Failure implements Streamable { - private String index; - private String id; - private Exception cause; - - Failure() { - - } + public static class Failure implements Writeable { + private final String index; + private final String id; + private final Exception cause; public Failure(String index, String id, Exception cause) { this.index = index; @@ -52,6 +48,19 @@ public Failure(String index, String id, Exception cause) { this.cause = cause; } + public Failure(StreamInput in) throws IOException { + index = in.readString(); + if (in.getVersion().before(Version.V_8_0_0)) { + // types no longer relevant so ignore + String type = in.readOptionalString(); + if (type != null) { + throw new IllegalStateException("types are no longer supported but found [" + type + "]"); + } + } + id = in.readString(); + cause = in.readException(); + } + /** * The index name of the action. */ @@ -73,26 +82,6 @@ public Exception getCause() { return this.cause; } - public static Failure readFailure(StreamInput in) throws IOException { - Failure failure = new Failure(); - failure.readFrom(in); - return failure; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - index = in.readString(); - if (in.getVersion().before(Version.V_8_0_0)) { - // types no longer relevant so ignore - String type = in.readOptionalString(); - if (type != null) { - throw new IllegalStateException("types are no longer supported but found [" + type + "]"); - } - } - id = in.readString(); - cause = in.readException(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); @@ -105,15 +94,20 @@ public void writeTo(StreamOutput out) throws IOException { } } - private MultiTermVectorsItemResponse[] responses; - - MultiTermVectorsResponse() { - } + private final MultiTermVectorsItemResponse[] responses; public MultiTermVectorsResponse(MultiTermVectorsItemResponse[] responses) { this.responses = responses; } + public MultiTermVectorsResponse(StreamInput in) throws IOException { + super(in); + responses = new MultiTermVectorsItemResponse[in.readVInt()]; + for (int i = 0; i < responses.length; i++) { + responses[i] = new MultiTermVectorsItemResponse(in); + } + } + public MultiTermVectorsItemResponse[] getResponses() { return this.responses; } @@ -152,15 +146,6 @@ static final class Fields { static final String _ID = "_id"; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - responses = new MultiTermVectorsItemResponse[in.readVInt()]; - for (int i = 0; i < responses.length; i++) { - responses[i] = MultiTermVectorsItemResponse.readItemResponse(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(responses.length); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java index d319e23cf511d..e3ed0fb562b3a 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java @@ -54,7 +54,7 @@ public class MultiTermVectorsShardResponse extends ActionResponse { responses.add(null); } if (in.readBoolean()) { - failures.add(MultiTermVectorsResponse.Failure.readFailure(in)); + failures.add(new MultiTermVectorsResponse.Failure(in)); } else { failures.add(null); } @@ -73,11 +73,6 @@ public void add(int location, MultiTermVectorsResponse.Failure failure) { failures.add(failure); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(locations.size()); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java index a6696a28608d6..1e5a94bd08cba 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class TermVectorsAction extends ActionType { @@ -28,11 +27,7 @@ public class TermVectorsAction extends ActionType { public static final String NAME = "indices:data/read/tv"; private TermVectorsAction() { - super(NAME); + super(NAME, TermVectorsResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return TermVectorsResponse::new; - } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 2cd1e3cccd7e5..09977ee8249b6 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -145,11 +145,6 @@ private boolean hasTermVectors() { return headerRef != null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - public Fields getFields() throws IOException { if (hasTermVectors() && isExists()) { if (!sourceCopied) { // make the bytes safe diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index e49ea292c61c8..f7aa31f23d503 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; @@ -45,12 +44,7 @@ public class TransportShardMultiTermsVectorAction extends private final IndicesService indicesService; private static final String ACTION_NAME = MultiTermVectorsAction.NAME + "[shard]"; - public static final ActionType TYPE = new StreamableResponseActionType<>(ACTION_NAME) { - @Override - public MultiTermVectorsShardResponse newResponse() { - return new MultiTermVectorsShardResponse(); - } - }; + public static final ActionType TYPE = new ActionType<>(ACTION_NAME, MultiTermVectorsShardResponse::new); @Inject public TransportShardMultiTermsVectorAction(ClusterService clusterService, TransportService transportService, diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index f2a2034d14ccd..1a2dd0a53c759 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; @@ -56,6 +57,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.Collections; import java.util.Map; @@ -89,8 +91,8 @@ protected String executor() { } @Override - protected UpdateResponse newResponse() { - return new UpdateResponse(); + protected UpdateResponse newResponse(StreamInput in) throws IOException { + return new UpdateResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/UpdateAction.java index f24f5436f5e4e..c97aa74aee671 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.action.update; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class UpdateAction extends StreamableResponseActionType { +public class UpdateAction extends ActionType { public static final UpdateAction INSTANCE = new UpdateAction(); public static final String NAME = "indices:data/write/update"; private UpdateAction() { - super(NAME); - } - - @Override - public UpdateResponse newResponse() { - return new UpdateResponse(); + super(NAME, UpdateResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index c6e45af0e6a89..7e0ff56f80442 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -140,7 +140,7 @@ Result prepareUpsert(ShardId shardId, UpdateRequest request, final GetResult get break; case NONE: UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), - getResult.getVersion(), DocWriteResponse.Result.NOOP); + getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), DocWriteResponse.Result.NOOP); update.setGetResult(getResult); return new Result(update, DocWriteResponse.Result.NOOP, upsertResult.v2(), XContentType.JSON); default: @@ -194,7 +194,7 @@ Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResu // where users repopulating multi-fields or adding synonyms, etc. if (detectNoop && noop) { UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), - getResult.getVersion(), DocWriteResponse.Result.NOOP); + getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), DocWriteResponse.Result.NOOP); update.setGetResult(extractGetResult(request, request.index(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, getResult.internalSourceRef())); return new Result(update, DocWriteResponse.Result.NOOP, updatedSourceAsMap, updateSourceContentType); @@ -257,7 +257,7 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes default: // If it was neither an INDEX or DELETE operation, treat it as a noop UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), - getResult.getVersion(), DocWriteResponse.Result.NOOP); + getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), DocWriteResponse.Result.NOOP); update.setGetResult(extractGetResult(request, request.index(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, getResult.internalSourceRef())); return new Result(update, DocWriteResponse.Result.NOOP, updatedSourceAsMap, updateSourceContentType); @@ -311,12 +311,12 @@ public static GetResult extractGetResult(final UpdateRequest request, String con public static class Result { - private final Streamable action; + private final Writeable action; private final DocWriteResponse.Result result; private final Map updatedSourceAsMap; private final XContentType updateSourceContentType; - public Result(Streamable action, DocWriteResponse.Result result, Map updatedSourceAsMap, + public Result(Writeable action, DocWriteResponse.Result result, Map updatedSourceAsMap, XContentType updateSourceContentType) { this.action = action; this.result = result; @@ -325,7 +325,7 @@ public Result(Streamable action, DocWriteResponse.Result result, Map T action() { + public T action() { return (T) action; } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index aa9121a040ecd..97057eedd60d2 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -125,8 +125,31 @@ public class UpdateRequest extends InstanceShardOperationRequest @Nullable private IndexRequest doc; - public UpdateRequest() { + public UpdateRequest() {} + public UpdateRequest(StreamInput in) throws IOException { + super(in); + waitForActiveShards = ActiveShardCount.readFrom(in); + type = in.readString(); + id = in.readString(); + routing = in.readOptionalString(); + if (in.readBoolean()) { + script = new Script(in); + } + retryOnConflict = in.readVInt(); + refreshPolicy = RefreshPolicy.readFrom(in); + if (in.readBoolean()) { + doc = new IndexRequest(in); + } + fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); + if (in.readBoolean()) { + upsertRequest = new IndexRequest(in); + } + docAsUpsert = in.readBoolean(); + ifSeqNo = in.readZLong(); + ifPrimaryTerm = in.readVLong(); + detectNoop = in.readBoolean(); + scriptedUpsert = in.readBoolean(); } public UpdateRequest(String index, String id) { @@ -828,32 +851,6 @@ public UpdateRequest scriptedUpsert(boolean scriptedUpsert) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - waitForActiveShards = ActiveShardCount.readFrom(in); - type = in.readString(); - id = in.readString(); - routing = in.readOptionalString(); - if (in.readBoolean()) { - script = new Script(in); - } - retryOnConflict = in.readVInt(); - refreshPolicy = RefreshPolicy.readFrom(in); - if (in.readBoolean()) { - doc = new IndexRequest(in); - } - fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); - if (in.readBoolean()) { - upsertRequest = new IndexRequest(in); - } - docAsUpsert = in.readBoolean(); - ifSeqNo = in.readZLong(); - ifPrimaryTerm = in.readVLong(); - detectNoop = in.readBoolean(); - scriptedUpsert = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java b/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java index f3afec4f25b29..05aaf53ed400b 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -39,15 +38,19 @@ public class UpdateResponse extends DocWriteResponse { private GetResult getResult; - public UpdateResponse() { + public UpdateResponse(StreamInput in) throws IOException { + super(in); + if (in.readBoolean()) { + getResult = new GetResult(in); + } } /** * Constructor to be used when a update didn't translate in a write. * For example: update script with operation set to none */ - public UpdateResponse(ShardId shardId, String type, String id, long version, Result result) { - this(new ShardInfo(0, 0), shardId, type, id, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, version, result); + public UpdateResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { + this(new ShardInfo(0, 0), shardId, type, id, seqNo, primaryTerm, version, result); } public UpdateResponse( @@ -69,14 +72,6 @@ public RestStatus status() { return this.result == Result.CREATED ? RestStatus.CREATED : super.status(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - if (in.readBoolean()) { - getResult = GetResult.readGetResult(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -156,10 +151,10 @@ public void setGetResult(GetResult getResult) { @Override public UpdateResponse build() { UpdateResponse update; - if (shardInfo != null && seqNo != null) { + if (shardInfo != null) { update = new UpdateResponse(shardInfo, shardId, type, id, seqNo, primaryTerm, version, result); } else { - update = new UpdateResponse(shardId, type, id, version, result); + update = new UpdateResponse(shardId, type, id, seqNo, primaryTerm, version, result); } if (getResult != null) { update.setGetResult(new GetResult(update.getIndex(), update.getType(), update.getId(), diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java index ab2efc6061e4e..cda6b536136d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.Objects; +import java.util.function.Predicate; public class ClusterName implements Writeable { @@ -81,4 +82,18 @@ public int hashCode() { public String toString() { return "Cluster [" + value + "]"; } + + public Predicate getEqualityPredicate() { + return new Predicate() { + @Override + public boolean test(ClusterName o) { + return ClusterName.this.equals(o); + } + + @Override + public String toString() { + return "local cluster name [" + ClusterName.this.value() + "]"; + } + }; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index f48413824d31f..b004af9d38ab6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -297,14 +297,27 @@ private class ConnectionTarget { private final AtomicInteger consecutiveFailureCount = new AtomicInteger(); - private final Runnable connectActivity = () -> threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(new AbstractRunnable() { + private final Runnable connectActivity = new AbstractRunnable() { + + final AbstractRunnable abstractRunnable = this; + @Override protected void doRun() { assert Thread.holdsLock(mutex) == false : "mutex unexpectedly held"; - transportService.connectToNode(discoveryNode); - consecutiveFailureCount.set(0); - logger.debug("connected to {}", discoveryNode); - onCompletion(ActivityType.CONNECTING, null, disconnectActivity); + transportService.connectToNode(discoveryNode, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + assert Thread.holdsLock(mutex) == false : "mutex unexpectedly held"; + consecutiveFailureCount.set(0); + logger.debug("connected to {}", discoveryNode); + onCompletion(ActivityType.CONNECTING, null, disconnectActivity); + } + + @Override + public void onFailure(Exception e) { + abstractRunnable.onFailure(e); + } + }); } @Override @@ -322,7 +335,7 @@ public void onFailure(Exception e) { public String toString() { return "connect to " + discoveryNode; } - }); + }; private final Runnable disconnectActivity = new AbstractRunnable() { @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index 6b35a5bd307a0..0e9ef56fc9872 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -449,9 +449,6 @@ public RestoreInProgress(StreamInput in) throws IOException { this.entries = entriesBuilder.build(); } - /** - * {@inheritDoc} - */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(entries.size()); @@ -472,14 +469,11 @@ public void writeTo(StreamOutput out) throws IOException { } } - /** - * {@inheritDoc} - */ @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startArray("snapshots"); for (ObjectCursor entry : entries.values()) { - toXContent(entry.value, builder, params); + toXContent(entry.value, builder); } builder.endArray(); return builder; @@ -490,9 +484,8 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par * * @param entry restore operation metadata * @param builder XContent builder - * @param params serialization parameters */ - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + public void toXContent(Entry entry, XContentBuilder builder) throws IOException { builder.startObject(); builder.field("snapshot", entry.snapshot().getSnapshotId().getName()); builder.field("repository", entry.snapshot().getRepository()); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index 0134b798c72fd..8e702fbdceea8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -44,10 +44,6 @@ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable i // the list of snapshot deletion request entries private final List entries; - public SnapshotDeletionsInProgress() { - this(Collections.emptyList()); - } - private SnapshotDeletionsInProgress(List entries) { this.entries = Collections.unmodifiableList(entries); } diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 039c7ec447767..3ce8615c47630 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -315,24 +315,21 @@ public String toString() { } public enum State { - INIT((byte) 0, false, false), - STARTED((byte) 1, false, false), - SUCCESS((byte) 2, true, false), - FAILED((byte) 3, true, true), - ABORTED((byte) 4, false, true), - MISSING((byte) 5, true, true), - WAITING((byte) 6, false, false); + INIT((byte) 0, false), + STARTED((byte) 1, false), + SUCCESS((byte) 2, true), + FAILED((byte) 3, true), + ABORTED((byte) 4, false), + MISSING((byte) 5, true), + WAITING((byte) 6, false); private final byte value; private final boolean completed; - private final boolean failed; - - State(byte value, boolean completed, boolean failed) { + State(byte value, boolean completed) { this.value = value; this.completed = completed; - this.failed = failed; } public byte value() { @@ -343,10 +340,6 @@ public boolean completed() { return completed; } - public boolean failed() { - return failed; - } - public static State fromValue(byte value) { switch (value) { case 0: diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index 5b3650ec43f37..5b377989c6530 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -54,7 +54,7 @@ public NodeMappingRefreshAction(TransportService transportService, MetaDataMappi this.transportService = transportService; this.metaDataMappingService = metaDataMappingService; transportService.registerRequestHandler(ACTION_NAME, - NodeMappingRefreshRequest::new, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); + ThreadPool.Names.SAME, NodeMappingRefreshRequest::new, new NodeMappingRefreshTransportHandler()); } public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) { @@ -80,7 +80,11 @@ public static class NodeMappingRefreshRequest extends TransportRequest implement private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; private String nodeId; - public NodeMappingRefreshRequest() { + public NodeMappingRefreshRequest(StreamInput in) throws IOException { + super(in); + index = in.readString(); + nodeId = in.readString(); + indexUUID = in.readString(); } public NodeMappingRefreshRequest(String index, String indexUUID, String nodeId) { @@ -118,13 +122,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); out.writeString(indexUUID); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - nodeId = in.readString(); - indexUUID = in.readString(); - } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index d28cf40026d6c..59e908179fbe9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -95,10 +95,13 @@ public ShardStateAction(ClusterService clusterService, TransportService transpor this.threadPool = threadPool; transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ThreadPool.Names.SAME, StartedShardEntry::new, - new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger)); + new ShardStartedTransportHandler(clusterService, + new ShardStartedClusterStateTaskExecutor(allocationService, rerouteService, logger), + logger)); transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ThreadPool.Names.SAME, FailedShardEntry::new, new ShardFailedTransportHandler(clusterService, - new ShardFailedClusterStateTaskExecutor(allocationService, rerouteService, logger), logger)); + new ShardFailedClusterStateTaskExecutor(allocationService, rerouteService, logger), + logger)); } private void sendShardAction(final String actionName, final ClusterState currentState, @@ -375,11 +378,12 @@ ClusterState applyFailedShards(ClusterState currentState, List fail public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { int numberOfUnassignedShards = clusterChangedEvent.state().getRoutingNodes().unassigned().size(); if (numberOfUnassignedShards > 0) { - String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); - if (logger.isTraceEnabled()) { - logger.trace("{}, scheduling a reroute", reason); - } - rerouteService.reroute(reason, ActionListener.wrap( + // The reroute called after failing some shards will not assign any shard back to the node on which it failed. If there were + // no other options for a failed shard then it is left unassigned. However, absent other options it's better to try and + // assign it again, even if that means putting it back on the node on which it previously failed: + final String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); + logger.trace("{}, scheduling a reroute", reason); + rerouteService.reroute(reason, Priority.NORMAL, ActionListener.wrap( r -> logger.trace("{}, reroute completed", reason), e -> logger.debug(new ParameterizedMessage("{}, reroute failed", reason), e))); } @@ -510,10 +514,12 @@ public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { private final AllocationService allocationService; private final Logger logger; + private final RerouteService rerouteService; - public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, Logger logger) { + public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, RerouteService rerouteService, Logger logger) { this.allocationService = allocationService; this.logger = logger; + this.rerouteService = rerouteService; } @Override @@ -588,6 +594,13 @@ public void onFailure(String source, Exception e) { logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } } + + @Override + public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + rerouteService.reroute("reroute after starting shards", Priority.NORMAL, ActionListener.wrap( + r -> logger.trace("reroute after starting shards succeeded"), + e -> logger.debug("reroute after starting shards failed", e))); + } } public static class StartedShardEntry extends TransportRequest { diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index 5f8f255d5f84c..66ff5099e0b96 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -34,7 +33,7 @@ import java.util.Locale; import java.util.Objects; -public class ClusterBlock implements Streamable, Writeable, ToXContentFragment { +public class ClusterBlock implements Writeable, ToXContentFragment { private int id; private @Nullable String uuid; @@ -142,11 +141,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java index 244ed105257bd..cfd4456062e35 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java @@ -340,7 +340,7 @@ public VotingConfiguration(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeStringArray(nodeIds.toArray(new String[nodeIds.size()])); + out.writeStringArray(nodeIds.toArray(new String[0])); } public boolean hasQuorum(Collection votes) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 7aad43aaab288..86e0837f1a94d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -503,7 +503,7 @@ public static class VoteCollection { private final Set joins; public boolean addVote(DiscoveryNode sourceNode) { - return nodes.put(sourceNode.getId(), sourceNode) == null; + return sourceNode.isMasterNode() && nodes.put(sourceNode.getId(), sourceNode) == null; } public boolean addJoinVote(Join join) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 2fceb76ccc1f4..ee40517f59f73 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -851,11 +851,23 @@ assert localNodeMayWinElection(getLastAcceptedState()) : ClusterState improveConfiguration(ClusterState clusterState) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; + // exclude any nodes whose ID is in the voting config exclusions list ... + final Stream excludedNodeIds = clusterState.getVotingConfigExclusions().stream().map(VotingConfigExclusion::getNodeId); + // ... and also automatically exclude the node IDs of master-ineligible nodes that were previously master-eligible and are still in + // the voting config. We could exclude all the master-ineligible nodes here, but there could be quite a few of them and that makes + // the logging much harder to follow. + final Stream masterIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false) + .filter(n -> n.isMasterNode() == false + && (clusterState.getLastAcceptedConfiguration().getNodeIds().contains(n.getId()) + || clusterState.getLastCommittedConfiguration().getNodeIds().contains(n.getId()))) + .map(DiscoveryNode::getId); + final Set liveNodes = StreamSupport.stream(clusterState.nodes().spliterator(), false) - .filter(this::hasJoinVoteFrom).collect(Collectors.toSet()); + .filter(DiscoveryNode::isMasterNode).filter(coordinationState.get()::containsJoinVoteFor).collect(Collectors.toSet()); final VotingConfiguration newConfig = reconfigurator.reconfigure(liveNodes, - clusterState.getVotingConfigExclusions().stream().map(VotingConfigExclusion::getNodeId).collect(Collectors.toSet()), + Stream.concat(masterIneligibleNodeIdsInVotingConfig, excludedNodeIds).collect(Collectors.toSet()), getLocalNode(), clusterState.getLastAcceptedConfiguration()); + if (newConfig.equals(clusterState.getLastAcceptedConfiguration()) == false) { assert coordinationState.get().joinVotesHaveQuorumFor(newConfig); return ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) @@ -893,9 +905,9 @@ public void onFailure(String source, Exception e) { } } - // for tests - boolean hasJoinVoteFrom(DiscoveryNode node) { - return coordinationState.get().containsJoinVoteFor(node); + // exposed for tests + boolean missingJoinVoteFrom(DiscoveryNode node) { + return node.isMasterNode() && coordinationState.get().containsJoinVoteFor(node) == false; } private void handleJoin(Join join) { @@ -904,13 +916,13 @@ private void handleJoin(Join join) { if (coordinationState.get().electionWon()) { // If we have already won the election then the actual join does not matter for election purposes, so swallow any exception - final boolean isNewJoin = handleJoinIgnoringExceptions(join); + final boolean isNewJoinFromMasterEligibleNode = handleJoinIgnoringExceptions(join); // If we haven't completely finished becoming master then there's already a publication scheduled which will, in turn, // schedule a reconfiguration if needed. It's benign to schedule a reconfiguration anyway, but it might fail if it wins the // race against the election-winning publication and log a big error message, which we can prevent by checking this here: final boolean establishedAsMaster = mode == Mode.LEADER && getLastAcceptedState().term() == getCurrentTerm(); - if (isNewJoin && establishedAsMaster && publicationInProgress() == false) { + if (isNewJoinFromMasterEligibleNode && establishedAsMaster && publicationInProgress() == false) { scheduleReconfigurationIfNeeded(); } } else { @@ -1349,7 +1361,7 @@ public void onFailure(Exception e) { } private void handleAssociatedJoin(Join join) { - if (join.getTerm() == getCurrentTerm() && hasJoinVoteFrom(join.getSourceNode()) == false) { + if (join.getTerm() == getCurrentTerm() && missingJoinVoteFrom(join.getSourceNode())) { logger.trace("handling {}", join); handleJoin(join); } @@ -1387,7 +1399,7 @@ protected void onMissingJoin(DiscoveryNode discoveryNode) { // The remote node did not include a join vote in its publish response. We do not persist joins, so it could be that the remote // node voted for us and then rebooted, or it could be that it voted for a different node in this term. If we don't have a copy // of a join from this node then we assume the latter and bump our term to obtain a vote from this node. - if (hasJoinVoteFrom(discoveryNode) == false) { + if (missingJoinVoteFrom(discoveryNode)) { final long term = publishRequest.getAcceptedState().term(); logger.debug("onMissingJoin: no join vote from {}, bumping term to exceed {}", discoveryNode, term); updateMaxTermSeen(term + 1); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java index 4a9be561ae96d..38ac7a32bdba3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java @@ -242,11 +242,9 @@ Set getKnownFollowers() { } private void handleDisconnectedNode(DiscoveryNode discoveryNode) { - synchronized (mutex) { - FollowerChecker followerChecker = followerCheckers.get(discoveryNode); - if (followerChecker != null && followerChecker.running()) { - followerChecker.failNode("disconnected"); - } + FollowerChecker followerChecker = followerCheckers.get(discoveryNode); + if (followerChecker != null) { + followerChecker.failNode("disconnected"); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index 5d4f847bc7c26..3162d6a5feb08 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -131,7 +131,7 @@ public ClusterTasksResult execute(ClusterState currentSta }); transportService.registerRequestHandler(VALIDATE_JOIN_ACTION_NAME, - ValidateJoinRequest::new, ThreadPool.Names.GENERIC, + ThreadPool.Names.GENERIC, ValidateJoinRequest::new, (request, channel, task) -> { final ClusterState localState = currentStateSupplier.get(); if (localState.metaData().clusterUUIDCommitted() && diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java index c5ed468993ff7..e3cf0cb2558b2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.Priority; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.ArrayList; @@ -150,7 +151,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo results.success(joinTask); } if (nodesChanged) { - rerouteService.reroute("post-join reroute", ActionListener.wrap( + rerouteService.reroute("post-join reroute", Priority.HIGH, ActionListener.wrap( r -> logger.trace("post-join reroute completed"), e -> logger.debug("post-join reroute failed", e))); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index da7c1d02a1e0b..2557328233eba 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -239,12 +239,18 @@ void handlePublishResponse(PublishResponse publishResponse) { if (applyCommitRequest.isPresent()) { sendApplyCommit(); } else { - Publication.this.handlePublishResponse(discoveryNode, publishResponse).ifPresent(applyCommit -> { - assert applyCommitRequest.isPresent() == false; - applyCommitRequest = Optional.of(applyCommit); - ackListener.onCommit(TimeValue.timeValueMillis(currentTimeSupplier.getAsLong() - startTime)); - publicationTargets.stream().filter(PublicationTarget::isWaitingForQuorum).forEach(PublicationTarget::sendApplyCommit); - }); + try { + Publication.this.handlePublishResponse(discoveryNode, publishResponse).ifPresent(applyCommit -> { + assert applyCommitRequest.isPresent() == false; + applyCommitRequest = Optional.of(applyCommit); + ackListener.onCommit(TimeValue.timeValueMillis(currentTimeSupplier.getAsLong() - startTime)); + publicationTargets.stream().filter(PublicationTarget::isWaitingForQuorum) + .forEach(PublicationTarget::sendApplyCommit); + }); + } catch (Exception e) { + setFailed(e); + onPossibleCommitFailure(); + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index 890877de1939b..4b451df2814fe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -92,8 +92,8 @@ public PublicationTransportHandler(TransportService transportService, NamedWrite this.namedWriteableRegistry = namedWriteableRegistry; this.handlePublishRequest = handlePublishRequest; - transportService.registerRequestHandler(PUBLISH_STATE_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.GENERIC, - false, false, (request, channel, task) -> channel.sendResponse(handleIncomingPublishRequest(request))); + transportService.registerRequestHandler(PUBLISH_STATE_ACTION_NAME, ThreadPool.Names.GENERIC, false, false, + BytesTransportRequest::new, (request, channel, task) -> channel.sendResponse(handleIncomingPublishRequest(request))); transportService.registerRequestHandler(COMMIT_STATE_ACTION_NAME, ThreadPool.Names.GENERIC, false, false, ApplyCommitRequest::new, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java index dec4a13c67d1e..4028d8c166560 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java @@ -28,18 +28,15 @@ public class ValidateJoinRequest extends TransportRequest { private ClusterState state; - public ValidateJoinRequest() {} + public ValidateJoinRequest(StreamInput in) throws IOException { + super(in); + this.state = ClusterState.readFrom(in, null); + } public ValidateJoinRequest(ClusterState state) { this.state = state; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.state = ClusterState.readFrom(in, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index ee3c2370b46f2..154b2ac23c216 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -573,7 +573,7 @@ boolean isPatternMatchingAllIndices(MetaData metaData, String[] indicesOrAliases return false; } - static final class Context { + public static class Context { private final ClusterState state; private final IndicesOptions options; @@ -593,7 +593,8 @@ static final class Context { this(state, options, startTime, false, false); } - Context(ClusterState state, IndicesOptions options, long startTime, boolean preserveAliases, boolean resolveToWriteIndex) { + protected Context(ClusterState state, IndicesOptions options, long startTime, + boolean preserveAliases, boolean resolveToWriteIndex) { this.state = state; this.options = options; this.startTime = startTime; @@ -851,7 +852,7 @@ private static List resolveEmptyOrTrivialWildcard(IndicesOptions options } } - static final class DateMathExpressionResolver implements ExpressionResolver { + public static final class DateMathExpressionResolver implements ExpressionResolver { private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); private static final String EXPRESSION_LEFT_BOUND = "<"; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 5becb69d615d3..5bab77e49fa37 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -35,6 +35,7 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; import org.elasticsearch.action.support.ActiveShardsObserver; import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -107,19 +108,18 @@ public class MetaDataIndexStateService { private final MetaDataIndexUpgradeService metaDataIndexUpgradeService; private final IndicesService indicesService; private final ThreadPool threadPool; - private final TransportVerifyShardBeforeCloseAction transportVerifyShardBeforeCloseAction; + private final NodeClient client; private final ActiveShardsObserver activeShardsObserver; @Inject public MetaDataIndexStateService(ClusterService clusterService, AllocationService allocationService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, - IndicesService indicesService, ThreadPool threadPool, - TransportVerifyShardBeforeCloseAction transportVerifyShardBeforeCloseAction) { + IndicesService indicesService, ThreadPool threadPool, NodeClient client) { this.indicesService = indicesService; this.clusterService = clusterService; this.allocationService = allocationService; this.threadPool = threadPool; - this.transportVerifyShardBeforeCloseAction = transportVerifyShardBeforeCloseAction; + this.client = client; this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; this.activeShardsObserver = new ActiveShardsObserver(clusterService, threadPool); } @@ -401,7 +401,7 @@ private void sendVerifyShardBeforeCloseRequest(final IndexShardRoutingTable shar if (request.ackTimeout() != null) { shardRequest.timeout(request.ackTimeout()); } - transportVerifyShardBeforeCloseAction.execute(shardRequest, new ActionListener<>() { + client.executeLocally(TransportVerifyShardBeforeCloseAction.TYPE, shardRequest, new ActionListener<>() { @Override public void onResponse(ReplicationResponse replicationResponse) { final TransportVerifyShardBeforeCloseAction.ShardRequest shardRequest = @@ -409,7 +409,7 @@ public void onResponse(ReplicationResponse replicationResponse) { if (request.ackTimeout() != null) { shardRequest.timeout(request.ackTimeout()); } - transportVerifyShardBeforeCloseAction.execute(shardRequest, listener); + client.executeLocally(TransportVerifyShardBeforeCloseAction.TYPE, shardRequest, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 72fdea8e6092d..2918947fa1fd4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -115,18 +115,15 @@ boolean isUpgraded(IndexMetaData indexMetaData) { } /** - * Elasticsearch v6.0 no longer supports indices created pre v5.0. All indices - * that were created before Elasticsearch v5.0 should be re-indexed in Elasticsearch 5.x - * before they can be opened by this version of elasticsearch. + * Elasticsearch does not support indices created before the previous major version. They must be reindexed using an earlier version + * before they can be opened here. */ private void checkSupportedVersion(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) { - if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData, - minimumIndexCompatibilityVersion) == false) { - throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created with version [" + if (isSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion) == false) { + throw new IllegalStateException("The index " + indexMetaData.getIndex() + " was created with version [" + indexMetaData.getCreationVersion() + "] but the minimum compatible version is [" - - + minimumIndexCompatibilityVersion + "]. It should be re-indexed in Elasticsearch " + minimumIndexCompatibilityVersion.major - + ".x before upgrading to " + Version.CURRENT + "."); + + minimumIndexCompatibilityVersion + "]. It should be re-indexed in Elasticsearch " + + minimumIndexCompatibilityVersion.major + ".x before upgrading to " + Version.CURRENT + "."); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java index 4ed4caadabe49..0e387db5f45ef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.PlainListenableActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NotMasterException; @@ -32,6 +31,8 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; +import java.util.ArrayList; +import java.util.List; import java.util.function.BiFunction; /** @@ -49,7 +50,8 @@ public class BatchedRerouteService implements RerouteService { private final Object mutex = new Object(); @Nullable // null if no reroute is currently pending - private PlainListenableActionFuture pendingRerouteListeners; + private List> pendingRerouteListeners; + private Priority pendingTaskPriority = Priority.LANGUID; /** * @param reroute Function that computes the updated cluster state after it has been rerouted. @@ -63,29 +65,55 @@ public BatchedRerouteService(ClusterService clusterService, BiFunction listener) { - final PlainListenableActionFuture currentListeners; + public final void reroute(String reason, Priority priority, ActionListener listener) { + final List> currentListeners; synchronized (mutex) { if (pendingRerouteListeners != null) { - logger.trace("already has pending reroute, adding [{}] to batch", reason); - pendingRerouteListeners.addListener(listener); - return; + if (priority.sameOrAfter(pendingTaskPriority)) { + logger.trace("already has pending reroute at priority [{}], adding [{}] with priority [{}] to batch", + pendingTaskPriority, reason, priority); + pendingRerouteListeners.add(listener); + return; + } else { + logger.trace("already has pending reroute at priority [{}], promoting batch to [{}] and adding [{}]", + pendingTaskPriority, priority, reason); + currentListeners = new ArrayList<>(1 + pendingRerouteListeners.size()); + currentListeners.add(listener); + currentListeners.addAll(pendingRerouteListeners); + pendingRerouteListeners.clear(); + pendingRerouteListeners = currentListeners; + pendingTaskPriority = priority; + } + } else { + logger.trace("no pending reroute, scheduling reroute [{}] at priority [{}]", reason, priority); + currentListeners = new ArrayList<>(1); + currentListeners.add(listener); + pendingRerouteListeners = currentListeners; + pendingTaskPriority = priority; } - currentListeners = PlainListenableActionFuture.newListenableFuture(); - currentListeners.addListener(listener); - pendingRerouteListeners = currentListeners; } - logger.trace("rerouting [{}]", reason); try { clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", - new ClusterStateUpdateTask(Priority.HIGH) { + new ClusterStateUpdateTask(priority) { + @Override public ClusterState execute(ClusterState currentState) { + final boolean currentListenersArePending; synchronized (mutex) { - assert pendingRerouteListeners == currentListeners; - pendingRerouteListeners = null; + assert currentListeners.isEmpty() == (pendingRerouteListeners != currentListeners) + : "currentListeners=" + currentListeners + ", pendingRerouteListeners=" + pendingRerouteListeners; + currentListenersArePending = pendingRerouteListeners == currentListeners; + if (currentListenersArePending) { + pendingRerouteListeners = null; + } + } + if (currentListenersArePending) { + logger.trace("performing batched reroute [{}]", reason); + return reroute.apply(currentState, reason); + } else { + logger.trace("batched reroute [{}] was promoted", reason); + return currentState; } - return reroute.apply(currentState, reason); } @Override @@ -95,7 +123,7 @@ public void onNoLongerMaster(String source) { pendingRerouteListeners = null; } } - currentListeners.onFailure(new NotMasterException("delayed reroute [" + reason + "] cancelled")); + ActionListener.onFailure(currentListeners, new NotMasterException("delayed reroute [" + reason + "] cancelled")); // no big deal, the new master will reroute again } @@ -114,22 +142,26 @@ public void onFailure(String source, Exception e) { logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); } - currentListeners.onFailure(new ElasticsearchException("delayed reroute [" + reason + "] failed", e)); + ActionListener.onFailure(currentListeners, + new ElasticsearchException("delayed reroute [" + reason + "] failed", e)); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - currentListeners.onResponse(null); + ActionListener.onResponse(currentListeners, null); } }); } catch (Exception e) { synchronized (mutex) { - assert pendingRerouteListeners == currentListeners; - pendingRerouteListeners = null; + assert currentListeners.isEmpty() == (pendingRerouteListeners != currentListeners); + if (pendingRerouteListeners == currentListeners) { + pendingRerouteListeners = null; + } } ClusterState state = clusterService.state(); logger.warn(() -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); - currentListeners.onFailure(new ElasticsearchException("delayed reroute [" + reason + "] could not be submitted", e)); + ActionListener.onFailure(currentListeners, + new ElasticsearchException("delayed reroute [" + reason + "] could not be submitted", e)); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RerouteService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RerouteService.java index 11a49322e10fa..58f9e41fe88a7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RerouteService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RerouteService.java @@ -19,11 +19,19 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Priority; /** * Asynchronously performs a cluster reroute, updating any shard states and rebalancing the cluster if appropriate. */ @FunctionalInterface public interface RerouteService { - void reroute(String reason, ActionListener listener); + + /** + * Schedule a cluster reroute. + * @param priority the (minimum) priority at which to run this reroute. If there is already a pending reroute at a higher priority then + * this reroute is batched with the pending one; if there is already a pending reroute at a lower priority then + * the priority of the pending batch is raised to the given priority. + */ + void reroute(String reason, Priority priority, ActionListener listener); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 9b22e63cf9ae5..eb139589724ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -109,9 +109,10 @@ public ClusterState applyStartedShards(ClusterState clusterState, List s.shardId().toString()); - return buildResultAndLogHealthChange(clusterState, allocation, "shards started [" + startedShardsAsString + "] ..."); + assert RoutingNodes.assertShardStats(allocation.routingNodes()); + String startedShardsAsString + = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString(), logger.isDebugEnabled()); + return buildResultAndLogHealthChange(clusterState, allocation, "shards started [" + startedShardsAsString + "]"); } protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) { @@ -208,8 +209,9 @@ public ClusterState applyFailedShards(final ClusterState clusterState, final Lis gatewayAllocator.applyFailedShards(allocation, failedShards); reroute(allocation); - String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.getRoutingEntry().shardId().toString()); - return buildResultAndLogHealthChange(clusterState, allocation, "shards failed [" + failedShardsAsString + "] ..."); + String failedShardsAsString + = firstListElementsToCommaDelimitedString(failedShards, s -> s.getRoutingEntry().shardId().toString(), logger.isDebugEnabled()); + return buildResultAndLogHealthChange(clusterState, allocation, "shards failed [" + failedShardsAsString + "]"); } /** @@ -317,13 +319,14 @@ private void resetFailedAllocationCounter(RoutingAllocation allocation) { * @param The list element type. * @return A comma-separated string of the first few elements. */ - private String firstListElementsToCommaDelimitedString(List elements, Function formatter) { + static String firstListElementsToCommaDelimitedString(List elements, Function formatter, boolean isDebugEnabled) { final int maxNumberOfElements = 10; - return elements - .stream() - .limit(maxNumberOfElements) - .map(formatter) - .collect(Collectors.joining(", ")); + if (isDebugEnabled || elements.size() <= maxNumberOfElements) { + return elements.stream().map(formatter).collect(Collectors.joining(", ")); + } else { + return elements.stream().limit(maxNumberOfElements).map(formatter).collect(Collectors.joining(", ")) + + ", ... [" + elements.size() + " items in total]"; + } } public CommandsResult reroute(final ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) { @@ -351,22 +354,12 @@ public CommandsResult reroute(final ClusterState clusterState, AllocationCommand return new CommandsResult(explanations, buildResultAndLogHealthChange(clusterState, allocation, "reroute commands")); } - /** * Reroutes the routing table based on the live nodes. *

* If the same instance of ClusterState is returned, then no change has been made. */ public ClusterState reroute(ClusterState clusterState, String reason) { - return reroute(clusterState, reason, false); - } - - /** - * Reroutes the routing table based on the live nodes. - *

- * If the same instance of ClusterState is returned, then no change has been made. - */ - protected ClusterState reroute(ClusterState clusterState, String reason, boolean debug) { ClusterState fixedClusterState = adaptAutoExpandReplicas(clusterState); RoutingNodes routingNodes = getMutableRoutingNodes(fixedClusterState); @@ -374,7 +367,6 @@ protected ClusterState reroute(ClusterState clusterState, String reason, boolean routingNodes.unassigned().shuffle(); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, fixedClusterState, clusterInfoService.getClusterInfo(), currentNanoTime()); - allocation.debugDecision(debug); reroute(allocation); if (fixedClusterState == clusterState && allocation.routingNodesChanged() == false) { return clusterState; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index 7177cf8bef4be..0e18ca3c0ea35 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ClusterSettings; @@ -185,7 +186,7 @@ public void onNewInfo(ClusterInfo info) { if (reroute) { logger.info("rerouting shards: [{}]", explanation); - rerouteService.reroute("disk threshold monitor", ActionListener.wrap(r -> { + rerouteService.reroute("disk threshold monitor", Priority.HIGH, ActionListener.wrap(r -> { setLastRunTimeMillis(); listener.onResponse(r); }, e -> { diff --git a/server/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java b/server/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java index 93da0b99e9a25..8c486d5b583ac 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java @@ -22,13 +22,13 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; -public class PendingClusterTask implements Streamable { +public class PendingClusterTask implements Writeable { private long insertOrder; private Priority priority; @@ -36,7 +36,12 @@ public class PendingClusterTask implements Streamable { private long timeInQueue; private boolean executing; - public PendingClusterTask() { + public PendingClusterTask(StreamInput in) throws IOException { + insertOrder = in.readVLong(); + priority = Priority.readFrom(in); + source = in.readText(); + timeInQueue = in.readLong(); + executing = in.readBoolean(); } public PendingClusterTask(long insertOrder, Priority priority, Text source, long timeInQueue, boolean executing) { @@ -73,15 +78,6 @@ public boolean isExecuting() { return executing; } - @Override - public void readFrom(StreamInput in) throws IOException { - insertOrder = in.readVLong(); - priority = Priority.readFrom(in); - source = in.readText(); - timeInQueue = in.readLong(); - executing = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(insertOrder); diff --git a/server/src/main/java/org/elasticsearch/common/Priority.java b/server/src/main/java/org/elasticsearch/common/Priority.java index bf7ded585d0a5..545c353e7ecf2 100644 --- a/server/src/main/java/org/elasticsearch/common/Priority.java +++ b/server/src/main/java/org/elasticsearch/common/Priority.java @@ -60,10 +60,18 @@ public static Priority fromByte(byte b) { this.value = value; } + /** + * @return whether tasks of {@code this} priority will run after those of priority {@code p}. + * For instance, {@code Priority.URGENT.after(Priority.IMMEDIATE)} returns {@code true}. + */ public boolean after(Priority p) { return this.compareTo(p) > 0; } + /** + * @return whether tasks of {@code this} priority will run no earlier than those of priority {@code p}. + * For instance, {@code Priority.URGENT.sameOrAfter(Priority.IMMEDIATE)} returns {@code true}. + */ public boolean sameOrAfter(Priority p) { return this.compareTo(p) >= 0; } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index a44d1fb05308a..94c6ea43d384c 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -38,15 +38,6 @@ public interface BlobContainer { */ BlobPath path(); - /** - * Tests whether a blob with the given blob name exists in the container. - * - * @param blobName - * The name of the blob whose existence is to be determined. - * @return {@code true} if a blob exists in the {@link BlobContainer} with the given name, and {@code false} otherwise. - */ - boolean blobExists(String blobName); - /** * Creates a new {@link InputStream} for the given blob name. * diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index b51115b246673..6723a70a9abb3 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -127,11 +127,6 @@ public void delete() throws IOException { IOUtils.rm(path); } - @Override - public boolean blobExists(String blobName) { - return Files.exists(path.resolve(blobName)); - } - @Override public InputStream readBlob(String name) throws IOException { final Path resolvedPath = path.resolve(name); diff --git a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java index 969c3347d67ae..61a5b829c5e48 100644 --- a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java +++ b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -44,12 +44,18 @@ * @see SearchHit * @see GetResult */ -public class DocumentField implements Streamable, ToXContentFragment, Iterable { +public class DocumentField implements Writeable, ToXContentFragment, Iterable { private String name; private List values; - private DocumentField() { + public DocumentField(StreamInput in) throws IOException { + name = in.readString(); + int size = in.readVInt(); + values = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + values.add(in.readGenericValue()); + } } public DocumentField(String name, List values) { @@ -93,22 +99,6 @@ public Iterator iterator() { return values.iterator(); } - public static DocumentField readDocumentField(StreamInput in) throws IOException { - DocumentField result = new DocumentField(); - result.readFrom(in); - return result; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - int size = in.readVInt(); - values = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - values.add(in.readGenericValue()); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java b/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java index 45db221a65dbf..ec5600836f1e4 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java @@ -228,8 +228,12 @@ private XContentBuilder coordinatesToXContent(Polygon polygon) throws IOExceptio private static Geometry createGeometry(String type, List geometries, CoordinateNode coordinates, Boolean orientation, boolean defaultOrientation, boolean coerce, DistanceUnit.Distance radius) { - - ShapeType shapeType = ShapeType.forName(type); + ShapeType shapeType; + if ("bbox".equalsIgnoreCase(type)) { + shapeType = ShapeType.ENVELOPE; + } else { + shapeType = ShapeType.forName(type); + } if (shapeType == ShapeType.GEOMETRYCOLLECTION) { if (geometries == null) { throw new ElasticsearchParseException("geometries not included"); @@ -484,7 +488,7 @@ public MultiPoint asMultiPoint() { return new MultiPoint(points); } - private double[][] asLineComponents(boolean orientation, boolean coerce) { + private double[][] asLineComponents(boolean orientation, boolean coerce, boolean close) { if (coordinate != null) { throw new ElasticsearchException("expected a list of points but got a point"); } @@ -495,7 +499,7 @@ private double[][] asLineComponents(boolean orientation, boolean coerce) { boolean needsClosing; int resultSize; - if (coerce && children.get(0).asPoint().equals(children.get(children.size() - 1).asPoint()) == false) { + if (close && coerce && children.get(0).asPoint().equals(children.get(children.size() - 1).asPoint()) == false) { needsClosing = true; resultSize = children.size() + 1; } else { @@ -531,12 +535,12 @@ private double[][] asLineComponents(boolean orientation, boolean coerce) { } public Line asLineString(boolean coerce) { - double[][] components = asLineComponents(true, coerce); + double[][] components = asLineComponents(true, coerce, false); return new Line(components[0], components[1], components[2]); } public LinearRing asLinearRing(boolean orientation, boolean coerce) { - double[][] components = asLineComponents(orientation, coerce); + double[][] components = asLineComponents(orientation, coerce, true); return new LinearRing(components[0], components[1], components[2]); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java index b4039fdbd2825..1043c9115e8ef 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java @@ -25,13 +25,21 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.geo.GeoUtils.EffectivePoint; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Rectangle; +import org.elasticsearch.geo.geometry.ShapeType; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.Geohash; +import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; import java.util.Arrays; +import java.util.Locale; import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; @@ -79,14 +87,16 @@ public GeoPoint resetLon(double lon) { } public GeoPoint resetFromString(String value) { - return resetFromString(value, false); + return resetFromString(value, false, EffectivePoint.BOTTOM_LEFT); } - public GeoPoint resetFromString(String value, final boolean ignoreZValue) { - if (value.contains(",")) { + public GeoPoint resetFromString(String value, final boolean ignoreZValue, EffectivePoint effectivePoint) { + if (value.toLowerCase(Locale.ROOT).contains("point")) { + return resetFromWKT(value, ignoreZValue); + } else if (value.contains(",")) { return resetFromCoordinates(value, ignoreZValue); } - return resetFromGeoHash(value); + return parseGeoHash(value, effectivePoint); } @@ -114,6 +124,39 @@ public GeoPoint resetFromCoordinates(String value, final boolean ignoreZValue) { return reset(lat, lon); } + private GeoPoint resetFromWKT(String value, boolean ignoreZValue) { + Geometry geometry; + try { + geometry = new WellKnownText(false, new GeographyValidator(ignoreZValue)) + .fromWKT(value); + } catch (Exception e) { + throw new ElasticsearchParseException("Invalid WKT format", e); + } + if (geometry.type() != ShapeType.POINT) { + throw new ElasticsearchParseException("[geo_point] supports only POINT among WKT primitives, " + + "but found " + geometry.type()); + } + Point point = (Point) geometry; + return reset(point.getLat(), point.getLon()); + } + + GeoPoint parseGeoHash(String geohash, EffectivePoint effectivePoint) { + if (effectivePoint == EffectivePoint.BOTTOM_LEFT) { + return resetFromGeoHash(geohash); + } else { + Rectangle rectangle = Geohash.toBoundingBox(geohash); + switch (effectivePoint) { + case TOP_LEFT: + return reset(rectangle.getMaxLat(), rectangle.getMinLon()); + case TOP_RIGHT: + return reset(rectangle.getMaxLat(), rectangle.getMaxLon()); + case BOTTOM_RIGHT: + return reset(rectangle.getMinLat(), rectangle.getMaxLon()); + default: + throw new IllegalArgumentException("Unsupported effective point " + effectivePoint); + } + } + } public GeoPoint resetFromIndexHash(long hash) { lon = Geohash.decodeLongitude(hash); diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index f990a9750e0e1..7e7cbac051f60 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -23,18 +23,14 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.XContentSubParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.MapXContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.geo.geometry.Rectangle; -import org.elasticsearch.geo.utils.Geohash; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.GeoPointValues; import org.elasticsearch.index.fielddata.MultiGeoPointValues; @@ -43,7 +39,7 @@ import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; import java.io.IOException; -import java.io.InputStream; +import java.util.Collections; public class GeoUtils { @@ -376,21 +372,12 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) thro * Array: two or more elements, the first element is longitude, the second is latitude, the rest is ignored if ignoreZValue is true */ public static GeoPoint parseGeoPoint(Object value, final boolean ignoreZValue) throws ElasticsearchParseException { - try { - XContentBuilder content = JsonXContent.contentBuilder(); - content.startObject(); - content.field("null_value", value); - content.endObject(); - - try (InputStream stream = BytesReference.bytes(content).streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - parser.nextToken(); // start object - parser.nextToken(); // field name - parser.nextToken(); // field value - return parseGeoPoint(parser, new GeoPoint(), ignoreZValue); - } - + try (XContentParser parser = new MapXContentParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, + Collections.singletonMap("null_value", value), null)) { + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // field value + return parseGeoPoint(parser, new GeoPoint(), ignoreZValue); } catch (IOException ex) { throw new ElasticsearchParseException("error parsing geopoint", ex); } @@ -487,7 +474,7 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina if(!Double.isNaN(lat) || !Double.isNaN(lon)) { throw new ElasticsearchParseException("field must be either lat/lon or geohash"); } else { - return parseGeoHash(point, geohash, effectivePoint); + return point.parseGeoHash(geohash, effectivePoint); } } else if (numberFormatException != null) { throw new ElasticsearchParseException("[{}] and [{}] must be valid double values", numberFormatException, LATITUDE, @@ -510,8 +497,10 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina lon = subParser.doubleValue(); } else if (element == 2) { lat = subParser.doubleValue(); - } else { + } else if (element == 3) { GeoPoint.assertZValue(ignoreZValue, subParser.doubleValue()); + } else { + throw new ElasticsearchParseException("[geo_point] field type does not accept > 3 dimensions"); } } else { throw new ElasticsearchParseException("numeric value expected"); @@ -521,35 +510,12 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina return point.reset(lat, lon); } else if(parser.currentToken() == Token.VALUE_STRING) { String val = parser.text(); - if (val.contains(",")) { - return point.resetFromString(val, ignoreZValue); - } else { - return parseGeoHash(point, val, effectivePoint); - } - + return point.resetFromString(val, ignoreZValue, effectivePoint); } else { throw new ElasticsearchParseException("geo_point expected"); } } - private static GeoPoint parseGeoHash(GeoPoint point, String geohash, EffectivePoint effectivePoint) { - if (effectivePoint == EffectivePoint.BOTTOM_LEFT) { - return point.resetFromGeoHash(geohash); - } else { - Rectangle rectangle = Geohash.toBoundingBox(geohash); - switch (effectivePoint) { - case TOP_LEFT: - return point.reset(rectangle.getMaxLat(), rectangle.getMinLon()); - case TOP_RIGHT: - return point.reset(rectangle.getMaxLat(), rectangle.getMaxLon()); - case BOTTOM_RIGHT: - return point.reset(rectangle.getMinLat(), rectangle.getMaxLon()); - default: - throw new IllegalArgumentException("Unsupported effective point " + effectivePoint); - } - } - } - /** * Parse a {@link GeoPoint} from a string. The string must have one of the following forms: * @@ -563,12 +529,7 @@ private static GeoPoint parseGeoHash(GeoPoint point, String geohash, EffectivePo */ public static GeoPoint parseFromString(String val) { GeoPoint point = new GeoPoint(); - boolean ignoreZValue = false; - if (val.contains(",")) { - return point.resetFromString(val, ignoreZValue); - } else { - return parseGeoHash(point, val, EffectivePoint.BOTTOM_LEFT); - } + return point.resetFromString(val, false, EffectivePoint.BOTTOM_LEFT); } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java new file mode 100644 index 0000000000000..48f84b1211b92 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java @@ -0,0 +1,933 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.common.geo; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; +import org.locationtech.spatial4j.exception.InvalidShapeException; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.apache.lucene.geo.GeoUtils.orient; +import static org.elasticsearch.common.geo.GeoUtils.normalizeLat; +import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; + +/** + * Utility class that converts geometries into Lucene-compatible form + */ +public final class GeometryIndexer { + + private static final double DATELINE = 180; + + protected static final Comparator INTERSECTION_ORDER = Comparator.comparingDouble(o -> o.intersect.getLat()); + + private final boolean orientation; + + public GeometryIndexer(boolean orientation) { + this.orientation = orientation; + } + + public Geometry prepareForIndexing(Geometry geometry) { + if (geometry == null) { + return null; + } + + return geometry.visit(new GeometryVisitor<>() { + @Override + public Geometry visit(Circle circle) { + throw new UnsupportedOperationException("CIRCLE geometry is not supported"); + } + + @Override + public Geometry visit(GeometryCollection collection) { + if (collection.isEmpty()) { + return GeometryCollection.EMPTY; + } + List shapes = new ArrayList<>(collection.size()); + + // Flatten collection and convert each geometry to Lucene-friendly format + for (Geometry shape : collection) { + shapes.add(shape.visit(this)); + } + + if (shapes.size() == 1) { + return shapes.get(0); + } else { + return new GeometryCollection<>(shapes); + } + } + + @Override + public Geometry visit(Line line) { + // decompose linestrings crossing dateline into array of Lines + List lines = decomposeGeometry(line, new ArrayList<>()); + if (lines.size() == 1) { + return lines.get(0); + } else { + return new MultiLine(lines); + } + } + + @Override + public Geometry visit(LinearRing ring) { + throw new UnsupportedOperationException("cannot index linear ring [" + ring + "] directly"); + } + + @Override + public Geometry visit(MultiLine multiLine) { + List lines = new ArrayList<>(); + for (Line line : multiLine) { + decomposeGeometry(line, lines); + } + if (lines.isEmpty()) { + return GeometryCollection.EMPTY; + } else if (lines.size() == 1) { + return lines.get(0); + } else { + return new MultiLine(lines); + } + } + + @Override + public Geometry visit(MultiPoint multiPoint) { + if (multiPoint.isEmpty()) { + return MultiPoint.EMPTY; + } else if (multiPoint.size() == 1) { + return multiPoint.get(0).visit(this); + } else { + List points = new ArrayList<>(); + for (Point point : multiPoint) { + points.add((Point) point.visit(this)); + } + return new MultiPoint(points); + } + } + + @Override + public Geometry visit(MultiPolygon multiPolygon) { + List polygons = new ArrayList<>(); + for (Polygon polygon : multiPolygon) { + polygons.addAll(decompose(polygon, orientation)); + } + if (polygons.size() == 1) { + return polygons.get(0); + } else { + return new MultiPolygon(polygons); + } + } + + @Override + public Geometry visit(Point point) { + //TODO: Just remove altitude for now. We need to add normalization later + return new Point(point.getLat(), point.getLon()); + } + + @Override + public Geometry visit(Polygon polygon) { + List polygons = decompose(polygon, orientation); + if (polygons.size() == 1) { + return polygons.get(0); + } else { + return new MultiPolygon(polygons); + } + } + + @Override + public Geometry visit(Rectangle rectangle) { + return rectangle; + } + }); + } + + /** + * Calculate the intersection of a line segment and a vertical dateline. + * + * @param p1x longitude of the start-point of the line segment + * @param p2x longitude of the end-point of the line segment + * @param dateline x-coordinate of the vertical dateline + * @return position of the intersection in the open range (0..1] if the line + * segment intersects with the line segment. Otherwise this method + * returns {@link Double#NaN} + */ + protected static double intersection(double p1x, double p2x, double dateline) { + if (p1x == p2x && p1x != dateline) { + return Double.NaN; + } else if (p1x == p2x && p1x == dateline) { + return 1.0; + } else { + final double t = (dateline - p1x) / (p2x - p1x); + if (t > 1 || t <= 0) { + return Double.NaN; + } else { + return t; + } + } + } + + /** + * Splits the specified line by datelines and adds them to the supplied lines array + */ + private List decomposeGeometry(Line line, List lines) { + + for (Line partPlus : decompose(+DATELINE, line)) { + for (Line partMinus : decompose(-DATELINE, partPlus)) { + double[] lats = new double[partMinus.length()]; + double[] lons = new double[partMinus.length()]; + for (int i = 0; i < partMinus.length(); i++) { + lats[i] = normalizeLat(partMinus.getLat(i)); + lons[i] = normalizeLon(partMinus.getLon(i)); + } + lines.add(new Line(lats, lons)); + } + } + return lines; + } + + /** + * Decompose a linestring given as array of coordinates at a vertical line. + * + * @param dateline x-axis intercept of the vertical line + * @param line linestring that should be decomposed + * @return array of linestrings given as coordinate arrays + */ + private List decompose(double dateline, Line line) { + double[] lons = line.getLons(); + double[] lats = line.getLats(); + return decompose(dateline, lons, lats); + } + + /** + * Decompose a linestring given as two arrays of coordinates at a vertical line. + */ + private List decompose(double dateline, double[] lons, double[] lats) { + int offset = 0; + ArrayList parts = new ArrayList<>(); + + double lastLon = lons[0]; + double shift = lastLon > DATELINE ? DATELINE : (lastLon < -DATELINE ? -DATELINE : 0); + + for (int i = 1; i < lons.length; i++) { + double t = intersection(lastLon, lons[i], dateline); + if (Double.isNaN(t) == false) { + double[] partLons = Arrays.copyOfRange(lons, offset, i + 1); + double[] partLats = Arrays.copyOfRange(lats, offset, i + 1); + if (t < 1) { + Point intersection = position(new Point(lats[i - 1], lons[i - 1]), new Point(lats[i], lons[i]), t); + partLons[partLons.length - 1] = intersection.getLon(); + partLats[partLats.length - 1] = intersection.getLat(); + + lons[offset + i - 1] = intersection.getLon(); + lats[offset + i - 1] = intersection.getLat(); + + shift(shift, lons); + offset = i - 1; + shift = lons[i] > DATELINE ? DATELINE : (lons[i] < -DATELINE ? -DATELINE : 0); + } else { + shift(shift, partLons); + offset = i; + } + parts.add(new Line(partLats, partLons)); + } + } + + if (offset == 0) { + shift(shift, lons); + parts.add(new Line(lats, lons)); + } else if (offset < lons.length - 1) { + double[] partLons = Arrays.copyOfRange(lons, offset, lons.length); + double[] partLats = Arrays.copyOfRange(lats, offset, lats.length); + shift(shift, partLons); + parts.add(new Line(partLats, partLons)); + } + return parts; + } + + /** + * shifts all coordinates by (- shift * 2) + */ + private static void shift(double shift, double[] lons) { + if (shift != 0) { + for (int j = 0; j < lons.length; j++) { + lons[j] = lons[j] - 2 * shift; + } + } + } + + protected static Point shift(Point coordinate, double dateline) { + if (dateline == 0) { + return coordinate; + } else { + return new Point(coordinate.getLat(), -2 * dateline + coordinate.getLon()); + } + } + + private List decompose(Polygon polygon, boolean orientation) { + int numEdges = polygon.getPolygon().length() - 1; // Last point is repeated + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + numEdges += polygon.getHole(i).length() - 1; + validateHole(polygon.getPolygon(), polygon.getHole(i)); + } + + Edge[] edges = new Edge[numEdges]; + Edge[] holeComponents = new Edge[polygon.getNumberOfHoles()]; + final AtomicBoolean translated = new AtomicBoolean(false); + int offset = createEdges(0, orientation, polygon.getPolygon(), null, edges, 0, translated); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + int length = createEdges(i + 1, orientation, polygon.getPolygon(), polygon.getHole(i), edges, offset, translated); + holeComponents[i] = edges[offset]; + offset += length; + } + + int numHoles = holeComponents.length; + + numHoles = merge(edges, 0, intersections(+DATELINE, edges), holeComponents, numHoles); + numHoles = merge(edges, 0, intersections(-DATELINE, edges), holeComponents, numHoles); + + return compose(edges, holeComponents, numHoles); + } + + private void validateHole(LinearRing shell, LinearRing hole) { + Set exterior = new HashSet<>(); + Set interior = new HashSet<>(); + for (int i = 0; i < shell.length(); i++) { + exterior.add(new Point(shell.getLat(i), shell.getLon(i))); + } + for (int i = 0; i < hole.length(); i++) { + interior.remove(new Point(hole.getLat(i), hole.getLon(i))); + } + exterior.retainAll(interior); + if (exterior.size() >= 2) { + throw new IllegalArgumentException("Invalid polygon, interior cannot share more than one point with the exterior"); + } + } + + /** + * This helper class implements a linked list for {@link Point}. It contains + * fields for a dateline intersection and component id + */ + private static final class Edge { + Point coordinate; // coordinate of the start point + Edge next; // next segment + Point intersect; // potential intersection with dateline + int component = -1; // id of the component this edge belongs to + public static final Point MAX_COORDINATE = new Point(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY); + + protected Edge(Point coordinate, Edge next, Point intersection) { + this.coordinate = coordinate; + // use setter to catch duplicate point cases + this.setNext(next); + this.intersect = intersection; + if (next != null) { + this.component = next.component; + } + } + + protected Edge(Point coordinate, Edge next) { + this(coordinate, next, Edge.MAX_COORDINATE); + } + + protected void setNext(Edge next) { + // don't bother setting next if its null + if (next != null) { + // self-loop throws an invalid shape + if (this.coordinate.equals(next.coordinate)) { + throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + this.coordinate); + } + this.next = next; + } + } + + /** + * Set the intersection of this line segment to the given position + * + * @param position position of the intersection [0..1] + * @return the {@link Point} of the intersection + */ + protected Point intersection(double position) { + return intersect = position(coordinate, next.coordinate, position); + } + + @Override + public String toString() { + return "Edge[Component=" + component + "; start=" + coordinate + " " + "; intersection=" + intersect + "]"; + } + } + + protected static Point position(Point p1, Point p2, double position) { + if (position == 0) { + return p1; + } else if (position == 1) { + return p2; + } else { + final double x = p1.getLon() + position * (p2.getLon() - p1.getLon()); + final double y = p1.getLat() + position * (p2.getLat() - p1.getLat()); + return new Point(y, x); + } + } + + private int createEdges(int component, boolean orientation, LinearRing shell, + LinearRing hole, Edge[] edges, int offset, final AtomicBoolean translated) { + // inner rings (holes) have an opposite direction than the outer rings + // XOR will invert the orientation for outer ring cases (Truth Table:, T/T = F, T/F = T, F/T = T, F/F = F) + boolean direction = (component == 0 ^ orientation); + // set the points array accordingly (shell or hole) + Point[] points = (hole != null) ? points(hole) : points(shell); + ring(component, direction, orientation == false, points, 0, edges, offset, points.length - 1, translated); + return points.length - 1; + } + + private Point[] points(LinearRing linearRing) { + Point[] points = new Point[linearRing.length()]; + for (int i = 0; i < linearRing.length(); i++) { + points[i] = new Point(linearRing.getLat(i), linearRing.getLon(i)); + } + return points; + } + + /** + * Create a connected list of a list of coordinates + * + * @param points array of point + * @param offset index of the first point + * @param length number of points + * @return Array of edges + */ + private Edge[] ring(int component, boolean direction, boolean handedness, + Point[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) { + + boolean orientation = getOrientation(points, offset, length); + + // OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness) + // since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards + // thus if orientation is computed as cw, the logic will translate points across dateline + // and convert to a right handed system + + // compute the bounding box and calculate range + double[] range = range(points, offset, length); + final double rng = range[1] - range[0]; + // translate the points if the following is true + // 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres + // (translation would result in a collapsed poly) + // 2. the shell of the candidate hole has been translated (to preserve the coordinate system) + boolean incorrectOrientation = component == 0 && handedness != orientation; + if ((incorrectOrientation && (rng > DATELINE && rng != 2 * DATELINE)) || (translated.get() && component != 0)) { + translate(points); + // flip the translation bit if the shell is being translated + if (component == 0) { + translated.set(true); + } + // correct the orientation post translation (ccw for shell, cw for holes) + if (component == 0 || (component != 0 && handedness == orientation)) { + orientation = !orientation; + } + } + return concat(component, direction ^ orientation, points, offset, edges, toffset, length); + } + + /** + * Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range + */ + private static void translate(Point[] points) { + for (int i = 0; i < points.length; i++) { + if (points[i].getLon() < 0) { + points[i] = new Point(points[i].getLat(), points[i].getLon() + 2 * DATELINE); + } + } + } + + /** + * @return whether the points are clockwise (true) or anticlockwise (false) + */ + private static boolean getOrientation(Point[] points, int offset, int length) { + // calculate the direction of the points: find the southernmost point + // and check its neighbors orientation. + + final int top = top(points, offset, length); + final int prev = (top + length - 1) % length; + final int next = (top + 1) % length; + + final int determinantSign = orient( + points[offset + prev].getLon(), points[offset + prev].getLat(), + points[offset + top].getLon(), points[offset + top].getLat(), + points[offset + next].getLon(), points[offset + next].getLat()); + + if (determinantSign == 0) { + // Points are collinear, but `top` is not in the middle if so, so the edges either side of `top` are intersecting. + throw new InvalidShapeException("Cannot determine orientation: edges adjacent to (" + + points[offset + top].getLon() + "," + points[offset + top].getLat() + ") coincide"); + } + + return determinantSign < 0; + } + + /** + * @return the (offset) index of the point that is furthest west amongst + * those points that are the furthest south in the set. + */ + private static int top(Point[] points, int offset, int length) { + int top = 0; // we start at 1 here since top points to 0 + for (int i = 1; i < length; i++) { + if (points[offset + i].getLat() < points[offset + top].getLat()) { + top = i; + } else if (points[offset + i].getLat() == points[offset + top].getLat()) { + if (points[offset + i].getLon() < points[offset + top].getLon()) { + top = i; + } + } + } + return top; + } + + + private static double[] range(Point[] points, int offset, int length) { + double minX = points[0].getLon(); + double maxX = minX; + double minY = points[0].getLat(); + double maxY = minY; + // compute the bounding coordinates (@todo: cleanup brute force) + for (int i = 1; i < length; ++i) { + Point point = points[offset + i]; + if (point.getLon() < minX) { + minX = point.getLon(); + } + if (point.getLon() > maxX) { + maxX = point.getLon(); + } + if (point.getLat() < minY) { + minY = point.getLat(); + } + if (point.getLat() > maxY) { + maxY = point.getLat(); + } + } + return new double[]{minX, maxX, minY, maxY}; + } + + private int merge(Edge[] intersections, int offset, int length, Edge[] holes, int numHoles) { + // Intersections appear pairwise. On the first edge the inner of + // of the polygon is entered. On the second edge the outer face + // is entered. Other kinds of intersections are discard by the + // intersection function + + for (int i = 0; i < length; i += 2) { + Edge e1 = intersections[offset + i + 0]; + Edge e2 = intersections[offset + i + 1]; + + // If two segments are connected maybe a hole must be deleted + // Since Edges of components appear pairwise we need to check + // the second edge only (the first edge is either polygon or + // already handled) + if (e2.component > 0) { + //TODO: Check if we could save the set null step + numHoles--; + holes[e2.component - 1] = holes[numHoles]; + holes[numHoles] = null; + } + // only connect edges if intersections are pairwise + // 1. per the comment above, the edge array is sorted by y-value of the intersection + // with the dateline. Two edges have the same y intercept when they cross the + // dateline thus they appear sequentially (pairwise) in the edge array. Two edges + // do not have the same y intercept when we're forming a multi-poly from a poly + // that wraps the dateline (but there are 2 ordered intercepts). + // The connect method creates a new edge for these paired edges in the linked list. + // For boundary conditions (e.g., intersect but not crossing) there is no sibling edge + // to connect. Thus the first logic check enforces the pairwise rule + // 2. the second logic check ensures the two candidate edges aren't already connected by an + // existing edge along the dateline - this is necessary due to a logic change in + // ShapeBuilder.intersection that computes dateline edges as valid intersect points + // in support of OGC standards + if (e1.intersect != Edge.MAX_COORDINATE && e2.intersect != Edge.MAX_COORDINATE + && !(e1.next.next.coordinate.equals(e2.coordinate) && Math.abs(e1.next.coordinate.getLon()) == DATELINE + && Math.abs(e2.coordinate.getLon()) == DATELINE)) { + connect(e1, e2); + } + } + return numHoles; + } + + private void connect(Edge in, Edge out) { + assert in != null && out != null; + assert in != out; + // Connecting two Edges by inserting the point at + // dateline intersection and connect these by adding + // two edges between this points. One per direction + if (in.intersect != in.next.coordinate) { + // NOTE: the order of the object creation is crucial here! Don't change it! + // first edge has no point on dateline + Edge e1 = new Edge(in.intersect, in.next); + + if (out.intersect != out.next.coordinate) { + // second edge has no point on dateline + Edge e2 = new Edge(out.intersect, out.next); + in.next = new Edge(in.intersect, e2, in.intersect); + } else { + // second edge intersects with dateline + in.next = new Edge(in.intersect, out.next, in.intersect); + } + out.next = new Edge(out.intersect, e1, out.intersect); + } else if (in.next != out && in.coordinate != out.intersect) { + // first edge intersects with dateline + Edge e2 = new Edge(out.intersect, in.next, out.intersect); + + if (out.intersect != out.next.coordinate) { + // second edge has no point on dateline + Edge e1 = new Edge(out.intersect, out.next); + in.next = new Edge(in.intersect, e1, in.intersect); + + } else { + // second edge intersects with dateline + in.next = new Edge(in.intersect, out.next, in.intersect); + } + out.next = e2; + } + } + + /** + * Concatenate a set of points to a polygon + * + * @param component component id of the polygon + * @param direction direction of the ring + * @param points list of points to concatenate + * @param pointOffset index of the first point + * @param edges Array of edges to write the result to + * @param edgeOffset index of the first edge in the result + * @param length number of points to use + * @return the edges creates + */ + private static Edge[] concat(int component, boolean direction, Point[] points, final int pointOffset, Edge[] edges, + final int edgeOffset, int length) { + assert edges.length >= length + edgeOffset; + assert points.length >= length + pointOffset; + edges[edgeOffset] = new Edge(new Point(points[pointOffset].getLat(), points[pointOffset].getLon()), null); + for (int i = 1; i < length; i++) { + Point nextPoint = new Point(points[pointOffset + i].getLat(), points[pointOffset + i].getLon()); + if (direction) { + edges[edgeOffset + i] = new Edge(nextPoint, edges[edgeOffset + i - 1]); + edges[edgeOffset + i].component = component; + } else if (!edges[edgeOffset + i - 1].coordinate.equals(nextPoint)) { + edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(nextPoint, null); + edges[edgeOffset + i - 1].component = component; + } else { + throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + nextPoint); + } + } + + if (direction) { + edges[edgeOffset].setNext(edges[edgeOffset + length - 1]); + edges[edgeOffset].component = component; + } else { + edges[edgeOffset + length - 1].setNext(edges[edgeOffset]); + edges[edgeOffset + length - 1].component = component; + } + + return edges; + } + + /** + * Calculate all intersections of line segments and a vertical line. The + * Array of edges will be ordered asc by the y-coordinate of the + * intersections of edges. + * + * @param dateline + * x-coordinate of the dateline + * @param edges + * set of edges that may intersect with the dateline + * @return number of intersecting edges + */ + protected static int intersections(double dateline, Edge[] edges) { + int numIntersections = 0; + assert !Double.isNaN(dateline); + for (int i = 0; i < edges.length; i++) { + Point p1 = edges[i].coordinate; + Point p2 = edges[i].next.coordinate; + assert !Double.isNaN(p2.getLon()) && !Double.isNaN(p1.getLon()); + edges[i].intersect = Edge.MAX_COORDINATE; + + double position = intersection(p1.getLon(), p2.getLon(), dateline); + if (!Double.isNaN(position)) { + edges[i].intersection(position); + numIntersections++; + } + } + Arrays.sort(edges, INTERSECTION_ORDER); + return numIntersections; + } + + + private static Edge[] edges(Edge[] edges, int numHoles, List> components) { + ArrayList mainEdges = new ArrayList<>(edges.length); + + for (int i = 0; i < edges.length; i++) { + if (edges[i].component >= 0) { + double[] partitionPoint = new double[3]; + int length = component(edges[i], -(components.size()+numHoles+1), mainEdges, partitionPoint); + List component = new ArrayList<>(); + component.add(coordinates(edges[i], new Point[length+1], partitionPoint)); + components.add(component); + } + } + + return mainEdges.toArray(new Edge[mainEdges.size()]); + } + + private static List compose(Edge[] edges, Edge[] holes, int numHoles) { + final List> components = new ArrayList<>(); + assign(holes, holes(holes, numHoles), numHoles, edges(edges, numHoles, components), components); + return buildPoints(components); + } + + private static void assign(Edge[] holes, Point[][] points, int numHoles, Edge[] edges, List> components) { + // Assign Hole to related components + // To find the new component the hole belongs to all intersections of the + // polygon edges with a vertical line are calculated. This vertical line + // is an arbitrary point of the hole. The polygon edge next to this point + // is part of the polygon the hole belongs to. + for (int i = 0; i < numHoles; i++) { + // To do the assignment we assume (and later, elsewhere, check) that each hole is within + // a single component, and the components do not overlap. Based on this assumption, it's + // enough to find a component that contains some vertex of the hole, and + // holes[i].coordinate is such a vertex, so we use that one. + + // First, we sort all the edges according to their order of intersection with the line + // of longitude through holes[i].coordinate, in order from south to north. Edges that do + // not intersect this line are sorted to the end of the array and of no further interest + // here. + final Edge current = new Edge(holes[i].coordinate, holes[i].next); + current.intersect = current.coordinate; + final int intersections = intersections(current.coordinate.getLon(), edges); + + if (intersections == 0) { + // There were no edges that intersect the line of longitude through + // holes[i].coordinate, so there's no way this hole is within the polygon. + throw new InvalidShapeException("Invalid shape: Hole is not within polygon"); + } + + // Next we do a binary search to find the position of holes[i].coordinate in the array. + // The binary search returns the index of an exact match, or (-insertionPoint - 1) if + // the vertex lies between the intersections of edges[insertionPoint] and + // edges[insertionPoint+1]. The latter case is vastly more common. + + final int pos; + boolean sharedVertex = false; + if (((pos = Arrays.binarySearch(edges, 0, intersections, current, INTERSECTION_ORDER)) >= 0) + && !(sharedVertex = (edges[pos].intersect.equals(current.coordinate)))) { + // The binary search returned an exact match, but we checked again using compareTo() + // and it didn't match after all. + + // TODO Can this actually happen? Needs a test to exercise it, or else needs to be removed. + throw new InvalidShapeException("Invalid shape: Hole is not within polygon"); + } + + final int index; + if (sharedVertex) { + // holes[i].coordinate lies exactly on an edge. + index = 0; // TODO Should this be pos instead of 0? This assigns exact matches to the southernmost component. + } else if (pos == -1) { + // holes[i].coordinate is strictly south of all intersections. Assign it to the + // southernmost component, and allow later validation to spot that it is not + // entirely within the chosen component. + index = 0; + } else { + // holes[i].coordinate is strictly north of at least one intersection. Assign it to + // the component immediately to its south. + index = -(pos + 2); + } + + final int component = -edges[index].component - numHoles - 1; + + components.get(component).add(points[i]); + } + } + + /** + * This method sets the component id of all edges in a ring to a given id and shifts the + * coordinates of this component according to the dateline + * + * @param edge An arbitrary edge of the component + * @param id id to apply to the component + * @param edges a list of edges to which all edges of the component will be added (could be null) + * @return number of edges that belong to this component + */ + private static int component(final Edge edge, final int id, final ArrayList edges, double[] partitionPoint) { + // find a coordinate that is not part of the dateline + Edge any = edge; + while(any.coordinate.getLon() == +DATELINE || any.coordinate.getLon() == -DATELINE) { + if((any = any.next) == edge) { + break; + } + } + + double shiftOffset = any.coordinate.getLon() > DATELINE ? DATELINE : (any.coordinate.getLon() < -DATELINE ? -DATELINE : 0); + + // run along the border of the component, collect the + // edges, shift them according to the dateline and + // update the component id + int length = 0, connectedComponents = 0; + // if there are two connected components, splitIndex keeps track of where to split the edge array + // start at 1 since the source coordinate is shared + int splitIndex = 1; + Edge current = edge; + Edge prev = edge; + // bookkeep the source and sink of each visited coordinate + HashMap> visitedEdge = new HashMap<>(); + do { + current.coordinate = shift(current.coordinate, shiftOffset); + current.component = id; + + if (edges != null) { + // found a closed loop - we have two connected components so we need to slice into two distinct components + if (visitedEdge.containsKey(current.coordinate)) { + partitionPoint[0] = current.coordinate.getLon(); + partitionPoint[1] = current.coordinate.getLat(); + if (connectedComponents > 0 && current.next != edge) { + throw new InvalidShapeException("Shape contains more than one shared point"); + } + + // a negative id flags the edge as visited for the edges(...) method. + // since we're splitting connected components, we want the edges method to visit + // the newly separated component + final int visitID = -id; + Edge firstAppearance = visitedEdge.get(current.coordinate).v2(); + // correct the graph pointers by correcting the 'next' pointer for both the + // first appearance and this appearance of the edge + Edge temp = firstAppearance.next; + firstAppearance.next = current.next; + current.next = temp; + current.component = visitID; + // backtrack until we get back to this coordinate, setting the visit id to + // a non-visited value (anything positive) + do { + prev.component = visitID; + prev = visitedEdge.get(prev.coordinate).v1(); + ++splitIndex; + } while (!current.coordinate.equals(prev.coordinate)); + ++connectedComponents; + } else { + visitedEdge.put(current.coordinate, new Tuple(prev, current)); + } + edges.add(current); + prev = current; + } + length++; + } while(connectedComponents == 0 && (current = current.next) != edge); + + return (splitIndex != 1) ? length-splitIndex: length; + } + + /** + * Compute all coordinates of a component + * @param component an arbitrary edge of the component + * @param coordinates Array of coordinates to write the result to + * @return the coordinates parameter + */ + private static Point[] coordinates(Edge component, Point[] coordinates, double[] partitionPoint) { + for (int i = 0; i < coordinates.length; i++) { + coordinates[i] = (component = component.next).coordinate; + } + // First and last coordinates must be equal + if (coordinates[0].equals(coordinates[coordinates.length - 1]) == false) { + if (partitionPoint[2] == Double.NaN) { + throw new InvalidShapeException("Self-intersection at or near point [" + + partitionPoint[0] + "," + partitionPoint[1] + "]"); + } else { + throw new InvalidShapeException("Self-intersection at or near point [" + + partitionPoint[0] + "," + partitionPoint[1] + "," + partitionPoint[2] + "]"); + } + } + return coordinates; + } + + private static List buildPoints(List> components) { + List result = new ArrayList<>(components.size()); + for (int i = 0; i < components.size(); i++) { + List component = components.get(i); + result.add(buildPolygon(component)); + } + return result; + } + + private static Polygon buildPolygon(List polygon) { + List holes; + Point[] shell = polygon.get(0); + if (polygon.size() > 1) { + holes = new ArrayList<>(polygon.size() - 1); + for (int i = 1; i < polygon.size(); ++i) { + Point[] coords = polygon.get(i); + //We do not have holes on the dateline as they get eliminated + //when breaking the polygon around it. + double[] x = new double[coords.length]; + double[] y = new double[coords.length]; + for (int c = 0; c < coords.length; ++c) { + x[c] = normalizeLon(coords[c].getLon()); + y[c] = normalizeLat(coords[c].getLat()); + } + holes.add(new org.elasticsearch.geo.geometry.LinearRing(y, x)); + } + } else { + holes = Collections.emptyList(); + } + + double[] x = new double[shell.length]; + double[] y = new double[shell.length]; + for (int i = 0; i < shell.length; ++i) { + //Lucene Tessellator treats different +180 and -180 and we should keep the sign. + //normalizeLon method excludes -180. + x[i] = Math.abs(shell[i].getLon()) > 180 ? normalizeLon(shell[i].getLon()) : shell[i].getLon(); + y[i] = normalizeLat(shell[i].getLat()); + } + + return new Polygon(new LinearRing(y, x), holes); + } + + private static Point[][] holes(Edge[] holes, int numHoles) { + if (numHoles == 0) { + return new Point[0][]; + } + final Point[][] points = new Point[numHoles][]; + + for (int i = 0; i < numHoles; i++) { + double[] partitionPoint = new double[3]; + int length = component(holes[i], -(i+1), null, partitionPoint); // mark as visited by inverting the sign + points[i] = coordinates(holes[i], new Point[length+1], partitionPoint); + } + + return points; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java index b96e41df5e472..1cbfe0c5c4397 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.geo.geometry.Geometry; -import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; @@ -38,10 +38,9 @@ public final class GeometryParser { private final GeoJson geoJsonParser; private final WellKnownText wellKnownTextParser; - private final GeometryValidator validator; public GeometryParser(boolean rightOrientation, boolean coerce, boolean ignoreZValue) { - validator = new GeographyValidator(ignoreZValue); + GeometryValidator validator = new StandardValidator(ignoreZValue); geoJsonParser = new GeoJson(rightOrientation, coerce, validator); wellKnownTextParser = new WellKnownText(coerce, validator); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java b/server/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java index e2e177c8f0fd2..23ba2f3ef6980 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java +++ b/server/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo; -import org.apache.lucene.document.LatLonShape.QueryRelation; +import org.apache.lucene.document.ShapeField.QueryRelation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index 9299edc459cb7..4a976d19b2347 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -20,18 +20,16 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.MapXContentParser; import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; import java.io.IOException; -import java.io.InputStream; +import java.util.Collections; /** * first point of entry for a shape parser @@ -75,14 +73,8 @@ static ShapeBuilder parse(XContentParser parser) throws IOException { } static ShapeBuilder parse(Object value) throws IOException { - XContentBuilder content = JsonXContent.contentBuilder(); - content.startObject(); - content.field("value", value); - content.endObject(); - - try (InputStream stream = BytesReference.bytes(content).streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + try (XContentParser parser = new MapXContentParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, + Collections.singletonMap("value", value), null)) { parser.nextToken(); // start object parser.nextToken(); // field name parser.nextToken(); // field value diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 2e213cccf1306..85e6a866016e8 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -75,7 +75,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.IntFunction; -import java.util.function.Supplier; import static org.elasticsearch.ElasticsearchException.readStackTrace; @@ -392,20 +391,35 @@ public Integer readOptionalVInt() throws IOException { return null; } - // we don't use a CharsRefBuilder since we exactly know the size of the character array up front + // Maximum char-count to de-serialize via the thread-local CharsRef buffer + private static final int SMALL_STRING_LIMIT = 1024; + + // Thread-local buffer for smaller strings + private static final ThreadLocal smallSpare = ThreadLocal.withInitial(() -> new CharsRef(SMALL_STRING_LIMIT)); + + // Larger buffer used for long strings that can't fit into the thread-local buffer + // We don't use a CharsRefBuilder since we exactly know the size of the character array up front // this prevents calling grow for every character since we don't need this - private final CharsRef spare = new CharsRef(); + private CharsRef largeSpare; public String readString() throws IOException { // TODO it would be nice to not call readByte() for every character but we don't know how much to read up-front // we can make the loop much more complicated but that won't buy us much compared to the bounds checks in readByte() final int charCount = readArraySize(); - if (spare.chars.length < charCount) { - // we don't use ArrayUtils.grow since there is no need to copy the array - spare.chars = new char[ArrayUtil.oversize(charCount, Character.BYTES)]; + final CharsRef charsRef; + if (charCount > SMALL_STRING_LIMIT) { + if (largeSpare == null) { + largeSpare = new CharsRef(ArrayUtil.oversize(charCount, Character.BYTES)); + } else if (largeSpare.chars.length < charCount) { + // we don't use ArrayUtils.grow since there is no need to copy the array + largeSpare.chars = new char[ArrayUtil.oversize(charCount, Character.BYTES)]; + } + charsRef = largeSpare; + } else { + charsRef = smallSpare.get(); } - spare.length = charCount; - final char[] buffer = spare.chars; + charsRef.length = charCount; + final char[] buffer = charsRef.chars; for (int i = 0; i < charCount; i++) { final int c = readByte() & 0xff; switch (c >> 4) { @@ -430,7 +444,7 @@ public String readString() throws IOException { throw new IOException("Invalid string; unexpected character: " + c + " hex: " + Integer.toHexString(c)); } } - return spare.toString(); + return charsRef.toString(); } public SecureString readSecureString() throws IOException { @@ -516,8 +530,14 @@ public String[] readOptionalStringArray() throws IOException { return null; } + /** + * If the returned map contains any entries it will be mutable. If it is empty it might be immutable. + */ public Map readMap(Writeable.Reader keyReader, Writeable.Reader valueReader) throws IOException { int size = readArraySize(); + if (size == 0) { + return Collections.emptyMap(); + } Map map = new HashMap<>(size); for (int i = 0; i < size; i++) { K key = keyReader.read(this); @@ -532,6 +552,8 @@ public Map readMap(Writeable.Reader keyReader, Writeable.Reader< *

      * Map<String, List<String>> map = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
      * 
+ * If the map or a list in it contains any elements it will be mutable, otherwise either the empty map or empty lists it contains + * might be immutable. * * @param keyReader The key reader * @param valueReader The value reader @@ -550,12 +572,19 @@ public Map> readMapOfLists(final Writeable.Reader keyReader return map; } + /** + * If the returned map contains any entries it will be mutable. If it is empty it might be immutable. + */ @Nullable @SuppressWarnings("unchecked") public Map readMap() throws IOException { return (Map) readGenericValue(); } + /** + * Reads a value of unspecified type. If a collection is read then the collection will be mutable if it contains any entry but might + * be immutable if it is empty. + */ @Nullable public Object readGenericValue() throws IOException { byte type = readByte(); @@ -635,6 +664,9 @@ public final Instant readOptionalInstant() throws IOException { @SuppressWarnings("unchecked") private List readArrayList() throws IOException { int size = readArraySize(); + if (size == 0) { + return Collections.emptyList(); + } List list = new ArrayList(size); for (int i = 0; i < size; i++) { list.add(readGenericValue()); @@ -652,8 +684,13 @@ private ZonedDateTime readZonedDateTime() throws IOException { return ZonedDateTime.ofInstant(Instant.ofEpochMilli(readLong()), ZoneId.of(timeZoneId)); } + private static final Object[] EMPTY_OBJECT_ARRAY = new Object[0]; + private Object[] readArray() throws IOException { int size8 = readArraySize(); + if (size8 == 0) { + return EMPTY_OBJECT_ARRAY; + } Object[] list8 = new Object[size8]; for (int i = 0; i < size8; i++) { list8[i] = readGenericValue(); @@ -663,6 +700,9 @@ private Object[] readArray() throws IOException { private Map readLinkedHashMap() throws IOException { int size9 = readArraySize(); + if (size9 == 0) { + return Collections.emptyMap(); + } Map map9 = new LinkedHashMap(size9); for (int i = 0; i < size9; i++) { map9.put(readString(), readGenericValue()); @@ -672,6 +712,9 @@ private Map readLinkedHashMap() throws IOException { private Map readHashMap() throws IOException { int size10 = readArraySize(); + if (size10 == 0) { + return Collections.emptyMap(); + } Map map10 = new HashMap(size10); for (int i = 0; i < size10; i++) { map10.put(readString(), readGenericValue()); @@ -724,8 +767,13 @@ public ZoneId readOptionalZoneId() throws IOException { return null; } + private static final int[] EMPTY_INT_ARRAY = new int[0]; + public int[] readIntArray() throws IOException { int length = readArraySize(); + if (length == 0) { + return EMPTY_INT_ARRAY; + } int[] values = new int[length]; for (int i = 0; i < length; i++) { values[i] = readInt(); @@ -735,6 +783,9 @@ public int[] readIntArray() throws IOException { public int[] readVIntArray() throws IOException { int length = readArraySize(); + if (length == 0) { + return EMPTY_INT_ARRAY; + } int[] values = new int[length]; for (int i = 0; i < length; i++) { values[i] = readVInt(); @@ -742,8 +793,13 @@ public int[] readVIntArray() throws IOException { return values; } + private static final long[] EMPTY_LONG_ARRAY = new long[0]; + public long[] readLongArray() throws IOException { int length = readArraySize(); + if (length == 0) { + return EMPTY_LONG_ARRAY; + } long[] values = new long[length]; for (int i = 0; i < length; i++) { values[i] = readLong(); @@ -753,6 +809,9 @@ public long[] readLongArray() throws IOException { public long[] readVLongArray() throws IOException { int length = readArraySize(); + if (length == 0) { + return EMPTY_LONG_ARRAY; + } long[] values = new long[length]; for (int i = 0; i < length; i++) { values[i] = readVLong(); @@ -760,8 +819,13 @@ public long[] readVLongArray() throws IOException { return values; } + private static final float[] EMPTY_FLOAT_ARRAY = new float[0]; + public float[] readFloatArray() throws IOException { int length = readArraySize(); + if (length == 0) { + return EMPTY_FLOAT_ARRAY; + } float[] values = new float[length]; for (int i = 0; i < length; i++) { values[i] = readFloat(); @@ -769,8 +833,13 @@ public float[] readFloatArray() throws IOException { return values; } + private static final double[] EMPTY_DOUBLE_ARRAY = new double[0]; + public double[] readDoubleArray() throws IOException { int length = readArraySize(); + if (length == 0) { + return EMPTY_DOUBLE_ARRAY; + } double[] values = new double[length]; for (int i = 0; i < length; i++) { values[i] = readDouble(); @@ -778,8 +847,13 @@ public double[] readDoubleArray() throws IOException { return values; } + private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; + public byte[] readByteArray() throws IOException { final int length = readArraySize(); + if (length == 0) { + return EMPTY_BYTE_ARRAY; + } final byte[] bytes = new byte[length]; readBytes(bytes, 0, bytes.length); return bytes; @@ -810,20 +884,6 @@ public T[] readOptionalArray(Writeable.Reader reader, IntFunction ar return readBoolean() ? readArray(reader, arraySupplier) : null; } - /** - * Serializes a potential null value. - */ - @Nullable - public T readOptionalStreamable(Supplier supplier) throws IOException { - if (readBoolean()) { - T streamable = supplier.get(); - streamable.readFrom(this); - return streamable; - } else { - return null; - } - } - @Nullable public T readOptionalWriteable(Writeable.Reader reader) throws IOException { if (readBoolean()) { @@ -977,41 +1037,19 @@ public C readOptionalNamedWriteable(Class category } /** - * Read a {@link List} of {@link Streamable} objects, using the {@code constructor} to instantiate each instance. - *

- * This is expected to take the form: - * - * List<MyStreamableClass> list = in.readStreamList(MyStreamableClass::new); - * - * - * @param constructor Streamable instance creator - * @return Never {@code null}. - * @throws IOException if any step fails - */ - public List readStreamableList(Supplier constructor) throws IOException { - int count = readArraySize(); - List builder = new ArrayList<>(count); - for (int i=0; i List readList(final Writeable.Reader reader) throws IOException { - return readCollection(reader, ArrayList::new); + return readCollection(reader, ArrayList::new, Collections.emptyList()); } /** * Reads a list of strings. The list is expected to have been written using {@link StreamOutput#writeStringCollection(Collection)}. + * If the returned list contains any entries it will be mutable. If it is empty it might be immutable. * * @return the list of strings * @throws IOException if an I/O exception occurs reading the list @@ -1021,18 +1059,22 @@ public List readStringList() throws IOException { } /** - * Reads a set of objects + * Reads a set of objects. If the returned set contains any entries it will be mutable. If it is empty it might be immutable. */ public Set readSet(Writeable.Reader reader) throws IOException { - return readCollection(reader, HashSet::new); + return readCollection(reader, HashSet::new, Collections.emptySet()); } /** * Reads a collection of objects */ private > C readCollection(Writeable.Reader reader, - IntFunction constructor) throws IOException { + IntFunction constructor, + C empty) throws IOException { int count = readArraySize(); + if (count == 0) { + return empty; + } C builder = constructor.apply(count); for (int i=0; i> C readCollection(Writeable.Reader List readNamedWriteableList(Class categoryClass) throws IOException { int count = readArraySize(); + if (count == 0) { + return Collections.emptyList(); + } List builder = new ArrayList<>(count); for (int i=0; i features = Collections.emptySet(); /** * The version of the node on the other side of this stream. @@ -126,27 +121,6 @@ public void setVersion(Version version) { this.version = version; } - /** - * Test if the stream has the specified feature. Features are used when serializing {@link ClusterState.Custom} or - * {@link MetaData.Custom}; see also {@link ClusterState.FeatureAware}. - * - * @param feature the feature to test - * @return true if the stream has the specified feature - */ - public boolean hasFeature(final String feature) { - return this.features.contains(feature); - } - - /** - * Set the features on the stream. See {@link StreamOutput#hasFeature(String)}. - * - * @param features the features on the stream - */ - public void setFeatures(final Set features) { - assert this.features.isEmpty() : this.features; - this.features = Set.copyOf(features); - } - public long position() throws IOException { throw new UnsupportedOperationException(); } @@ -232,19 +206,25 @@ public void writeBytesRef(BytesRef bytes) throws IOException { write(bytes.bytes, bytes.offset, bytes.length); } + private static final ThreadLocal scratch = ThreadLocal.withInitial(() -> new byte[1024]); + public final void writeShort(short v) throws IOException { - writeByte((byte) (v >> 8)); - writeByte((byte) v); + final byte[] buffer = scratch.get(); + buffer[0] = (byte) (v >> 8); + buffer[1] = (byte) v; + writeBytes(buffer, 0, 2); } /** * Writes an int as four bytes. */ public void writeInt(int i) throws IOException { - writeByte((byte) (i >> 24)); - writeByte((byte) (i >> 16)); - writeByte((byte) (i >> 8)); - writeByte((byte) i); + final byte[] buffer = scratch.get(); + buffer[0] = (byte) (i >> 24); + buffer[1] = (byte) (i >> 16); + buffer[2] = (byte) (i >> 8); + buffer[3] = (byte) i; + writeBytes(buffer, 0, 4); } /** @@ -254,19 +234,30 @@ public void writeInt(int i) throws IOException { * using {@link #writeInt} */ public void writeVInt(int i) throws IOException { + final byte[] buffer = scratch.get(); + int index = 0; while ((i & ~0x7F) != 0) { - writeByte((byte) ((i & 0x7f) | 0x80)); + buffer[index++] = ((byte) ((i & 0x7f) | 0x80)); i >>>= 7; } - writeByte((byte) i); + buffer[index++] = ((byte) i); + writeBytes(buffer, 0, index); } /** * Writes a long as eight bytes. */ public void writeLong(long i) throws IOException { - writeInt((int) (i >> 32)); - writeInt((int) i); + final byte[] buffer = scratch.get(); + buffer[0] = (byte) (i >> 56); + buffer[1] = (byte) (i >> 48); + buffer[2] = (byte) (i >> 40); + buffer[3] = (byte) (i >> 32); + buffer[4] = (byte) (i >> 24); + buffer[5] = (byte) (i >> 16); + buffer[6] = (byte) (i >> 8); + buffer[7] = (byte) i; + writeBytes(buffer, 0, 8); } /** @@ -286,11 +277,14 @@ public void writeVLong(long i) throws IOException { * {@link #writeVLong(long)} instead. */ void writeVLongNoCheck(long i) throws IOException { + final byte[] buffer = scratch.get(); + int index = 0; while ((i & ~0x7F) != 0) { - writeByte((byte) ((i & 0x7f) | 0x80)); + buffer[index++] = ((byte) ((i & 0x7f) | 0x80)); i >>>= 7; } - writeByte((byte) i); + buffer[index++] = ((byte) i); + writeBytes(buffer, 0, index); } /** @@ -301,13 +295,16 @@ void writeVLongNoCheck(long i) throws IOException { * If the numbers are known to be non-negative, use {@link #writeVLong(long)} */ public void writeZLong(long i) throws IOException { + final byte[] buffer = scratch.get(); + int index = 0; // zig-zag encoding cf. https://developers.google.com/protocol-buffers/docs/encoding?hl=en long value = BitUtil.zigZagEncode(i); while ((value & 0xFFFFFFFFFFFFFF80L) != 0L) { - writeByte((byte)((value & 0x7F) | 0x80)); + buffer[index++] = (byte) ((value & 0x7F) | 0x80); value >>>= 7; } - writeByte((byte) (value & 0x7F)); + buffer[index++] = (byte) (value & 0x7F); + writeBytes(buffer, 0, index); } public void writeOptionalLong(@Nullable Long l) throws IOException { @@ -394,18 +391,9 @@ public void writeText(Text text) throws IOException { } } - // we use a small buffer to convert strings to bytes since we want to prevent calling writeByte - // for every byte in the string (see #21660 for details). - // This buffer will never be the oversized limit of 1024 bytes and will not be shared across streams - private byte[] convertStringBuffer = BytesRef.EMPTY_BYTES; // TODO should we reduce it to 0 bytes once the stream is closed? - public void writeString(String str) throws IOException { final int charCount = str.length(); - final int bufferSize = Math.min(3 * charCount, 1024); // at most 3 bytes per character is needed here - if (convertStringBuffer.length < bufferSize) { // we don't use ArrayUtils.grow since copying the bytes is unnecessary - convertStringBuffer = new byte[ArrayUtil.oversize(bufferSize, Byte.BYTES)]; - } - byte[] buffer = convertStringBuffer; + byte[] buffer = scratch.get(); int offset = 0; writeVInt(charCount); for (int i = 0; i < charCount; i++) { @@ -916,18 +904,6 @@ public void writeOptionalArray(@Nullable T[] array) throws writeOptionalArray((out, value) -> value.writeTo(out), array); } - /** - * Serializes a potential null value. - */ - public void writeOptionalStreamable(@Nullable Streamable streamable) throws IOException { - if (streamable != null) { - writeBoolean(true); - streamable.writeTo(this); - } else { - writeBoolean(false); - } - } - public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException { if (writeable != null) { writeBoolean(true); @@ -1120,16 +1096,6 @@ public void writeOptionalZoneId(@Nullable ZoneId timeZone) throws IOException { } } - /** - * Writes a list of {@link Streamable} objects - */ - public void writeStreamableList(List list) throws IOException { - writeVInt(list.size()); - for (Streamable obj: list) { - obj.writeTo(this); - } - } - /** * Writes a collection to this stream. The corresponding collection can be read from a stream input using * {@link StreamInput#readList(Writeable.Reader)}. diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/Streamable.java b/server/src/main/java/org/elasticsearch/common/io/stream/Streamable.java deleted file mode 100644 index 86a4d3ed95c2f..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/io/stream/Streamable.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.io.stream; - -import java.io.IOException; -import java.util.function.Supplier; - -/** - * Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown - * across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by - * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. - * - * Prefer implementing {@link Writeable} over implementing this interface where possible. Lots of code depends on this interface so this - * isn't always possible. - * - * Implementers of this interface almost always declare a no arg constructor that is exclusively used for creating "empty" objects on which - * you then call {@link #readFrom(StreamInput)}. Because {@linkplain #readFrom(StreamInput)} isn't part of the constructor the fields - * on implementers cannot be final. It is these reasons that this interface has fallen out of favor compared to {@linkplain Writeable}. - */ -public interface Streamable { - /** - * Set this object's fields from a {@linkplain StreamInput}. - */ - void readFrom(StreamInput in) throws IOException; - - /** - * Write this object's fields to a {@linkplain StreamOutput}. - */ - void writeTo(StreamOutput out) throws IOException; - - static Writeable.Reader newWriteableReader(Supplier supplier) { - return (StreamInput in) -> { - T request = supplier.get(); - request.readFrom(in); - return request; - }; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/server/src/main/java/org/elasticsearch/common/io/stream/Writeable.java index 9d645038d6528..82cc2323b1aa1 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/Writeable.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/Writeable.java @@ -25,9 +25,6 @@ * Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown * across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. - *

- * Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable} - * so this isn't always possible. */ public interface Writeable { diff --git a/server/src/main/java/org/elasticsearch/common/logging/DeprecatedMessage.java b/server/src/main/java/org/elasticsearch/common/logging/DeprecatedMessage.java new file mode 100644 index 0000000000000..cf31ab796c79f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/DeprecatedMessage.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.common.Strings; + +import java.util.Collections; +import java.util.Map; + +/** + * A logger message used by {@link DeprecationLogger}. + * Carries x-opaque-id field if provided in the headers. Will populate the x-opaque-id field in JSON logs. + */ +public class DeprecatedMessage extends ESLogMessage { + + public DeprecatedMessage(String messagePattern, String xOpaqueId, Object... args) { + super(fieldMap(xOpaqueId), messagePattern, args); + } + + private static Map fieldMap(String xOpaqueId) { + if (Strings.isNullOrEmpty(xOpaqueId)) { + return Collections.emptyMap(); + } + + return Map.of("x-opaque-id", xOpaqueId); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index d5d682f807ae3..f0deaa5b73026 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -25,6 +25,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.Task; import java.nio.charset.Charset; import java.security.AccessController; @@ -133,7 +134,9 @@ protected boolean removeEldestEntry(final Map.Entry eldest) { * @param params parameters to the message */ public void deprecatedAndMaybeLog(final String key, final String msg, final Object... params) { - deprecated(THREAD_CONTEXT, msg, keys.add(key), params); + String xOpaqueId = getXOpaqueId(THREAD_CONTEXT); + boolean log = keys.add(xOpaqueId + key); + deprecated(THREAD_CONTEXT, msg, log, params); } /* @@ -223,7 +226,6 @@ void deprecated(final Set threadContexts, final String message, f void deprecated(final Set threadContexts, final String message, final boolean log, final Object... params) { final Iterator iterator = threadContexts.iterator(); - if (iterator.hasNext()) { final String formattedMessage = LoggerMessageFormat.format(message, params); final String warningHeaderValue = formatWarning(formattedMessage); @@ -244,13 +246,27 @@ void deprecated(final Set threadContexts, final String message, f @SuppressLoggerChecks(reason = "safely delegates to logger") @Override public Void run() { - logger.warn(message, params); + /** + * There should be only one threadContext (in prod env), @see DeprecationLogger#setThreadContext + */ + String opaqueId = getXOpaqueId(threadContexts); + + logger.warn(new DeprecatedMessage(message, opaqueId, params)); return null; } }); } } + public String getXOpaqueId(Set threadContexts) { + return threadContexts.stream() + .filter(t -> t.isClosed() == false) + .filter(t -> t.getHeader(Task.X_OPAQUE_ID) != null) + .findFirst() + .map(t -> t.getHeader(Task.X_OPAQUE_ID)) + .orElse(""); + } + /** * Format a warning string in the proper warning format by prepending a warn code, warn agent, wrapping the warning string in quotes, * and appending the RFC 7231 date. diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index af7cd81f202e3..425743f2b8a2a 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -24,6 +24,7 @@ import org.apache.logging.log4j.core.config.Node; import org.apache.logging.log4j.core.config.plugins.Plugin; import org.apache.logging.log4j.core.config.plugins.PluginAttribute; +import org.apache.logging.log4j.core.config.plugins.PluginBuilderFactory; import org.apache.logging.log4j.core.config.plugins.PluginFactory; import org.apache.logging.log4j.core.layout.AbstractStringLayout; import org.apache.logging.log4j.core.layout.ByteBufferDestination; @@ -31,66 +32,182 @@ import org.elasticsearch.common.Strings; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.LinkedHashMap; import java.util.Map; +import java.util.Set; /** * Formats log events as strings in a json format. *

* The class is wrapping the {@link PatternLayout} with a pattern to format into json. This gives more flexibility and control over how the * log messages are formatted in {@link org.apache.logging.log4j.core.layout.JsonLayout} + * There are fields which are always present in the log line: + *

    + *
  • type - the type of logs. These represent appenders and help docker distinguish log streams.
  • + *
  • timestamp - ISO8601 with additional timezone ID
  • + *
  • level - INFO, WARN etc
  • + *
  • component - logger name, most of the times class name
  • + *
  • cluster.name - taken from sys:es.logs.cluster_name system property because it is always set
  • + *
  • node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml
  • + *
  • node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present + * once clusterStateUpdate is first received
  • + *
  • message - a json escaped message. Multiline messages will be converted to single line with new line explicitly + * replaced to \n
  • + *
  • exceptionAsJson - in json as a stacktrace field. Only present when throwable is passed as a parameter when using a logger. + * Taken from JsonThrowablePatternConverter
  • + *
+ *

+ * It is possible to add more or override them with esmessagefield + * appender.logger.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id + * Each of these will be expanded into a json field with a value taken {@link ESLogMessage} field. In the example above + * ... "message": %ESMessageField{message}, "took": %ESMessageField{took} ... + * the message passed to a logger will be overriden with a value from %ESMessageField{message} + *

+ * The value taken from %ESMessageField{message} has to be a simple escaped JSON value and is populated in subclasses of + * ESLogMessage */ @Plugin(name = "ESJsonLayout", category = Node.CATEGORY, elementType = Layout.ELEMENT_TYPE, printObject = true) public class ESJsonLayout extends AbstractStringLayout { - /** - * Fields used in a pattern to format a json log line: - *

    - *
  • type - the type of logs. These represent appenders and help docker distinguish log streams.
  • - *
  • timestamp - ISO8601 with additional timezone ID
  • - *
  • level - INFO, WARN etc
  • - *
  • component - logger name, most of the times class name
  • - *
  • cluster.name - taken from sys:es.logs.cluster_name system property because it is always set
  • - *
  • node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml
  • - *
  • node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present - * once clusterStateUpdate is first received
  • - *
  • message - a json escaped message. Multiline messages will be converted to single line with new line explicitly - * replaced to \n
  • - *
  • exceptionAsJson - in json as a stacktrace field. Only present when throwable is passed as a parameter when using a logger. - * Taken from JsonThrowablePatternConverter
  • - *
- */ - private static final String PATTERN = "{" + - "\"type\": \"${TYPE}\", " + - "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " + - "\"level\": \"%p\", " + - "\"component\": \"%c{1.}\", " + - "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + - "\"node.name\": \"%node_name\", " + - "%notEmpty{%node_and_cluster_id, } " + - "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}\" " + - "%exceptionAsJson " + - "}%n"; private final PatternLayout patternLayout; - protected ESJsonLayout(String typeName, Charset charset) { + protected ESJsonLayout(String typeName, Charset charset, String[] esmessagefields) { super(charset); this.patternLayout = PatternLayout.newBuilder() - .withPattern(pattern(typeName)) + .withPattern(pattern(typeName, esmessagefields)) .withAlwaysWriteExceptions(false) .build(); } - private String pattern(String type) { + private String pattern(String type, String[] esMessageFields) { if (Strings.isEmpty(type)) { throw new IllegalArgumentException("layout parameter 'type_name' cannot be empty"); } - return PATTERN.replace("${TYPE}", type); + Map map = new LinkedHashMap<>(); + map.put("type", inQuotes(type)); + map.put("timestamp", inQuotes("%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}")); + map.put("level", inQuotes("%p")); + map.put("component", inQuotes("%c{1.}")); + map.put("cluster.name", inQuotes("${sys:es.logs.cluster_name}")); + map.put("node.name", inQuotes("%node_name")); + map.put("message", inQuotes("%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}")); + + for (String key : esMessageFields) { + map.put(key, inQuotes("%ESMessageField{" + key + "}")); + } + + return createPattern(map, Set.of(esMessageFields)); + } + + + private String createPattern(Map map, Set esMessageFields) { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + String separator = ""; + for (Map.Entry entry : map.entrySet()) { + + if (esMessageFields.contains(entry.getKey())) { + sb.append("%notEmpty{"); + sb.append(separator); + appendField(sb, entry); + sb.append("}"); + } else { + sb.append(separator); + appendField(sb, entry); + } + + separator = ", "; + } + sb.append(notEmpty(", %node_and_cluster_id ")); + sb.append("%exceptionAsJson "); + sb.append("}"); + sb.append(System.lineSeparator()); + + return sb.toString(); + } + + private void appendField(StringBuilder sb, Map.Entry entry) { + sb.append(jsonKey(entry.getKey())); + sb.append(entry.getValue().toString()); + } + + private String notEmpty(String value) { + return "%notEmpty{" + value + "}"; + } + + private CharSequence jsonKey(String s) { + return inQuotes(s) + ": "; + } + + private String inQuotes(String s) { + return "\"" + s + "\""; } @PluginFactory - public static ESJsonLayout createLayout(@PluginAttribute("type_name") String type, - @PluginAttribute(value = "charset", defaultString = "UTF-8") Charset charset) { - return new ESJsonLayout(type, charset); + public static ESJsonLayout createLayout(String type, + Charset charset, + String[] esmessagefields) { + return new ESJsonLayout(type, charset, esmessagefields); + } + + PatternLayout getPatternLayout() { + return patternLayout; + } + + public static class Builder> extends AbstractStringLayout.Builder + implements org.apache.logging.log4j.core.util.Builder { + + @PluginAttribute("type_name") + String type; + + @PluginAttribute(value = "charset", defaultString = "UTF-8") + Charset charset; + + @PluginAttribute("esmessagefields") + private String esMessageFields; + + public Builder() { + setCharset(StandardCharsets.UTF_8); + } + + @Override + public ESJsonLayout build() { + String[] split = Strings.isNullOrEmpty(esMessageFields) ? new String[]{} : esMessageFields.split(","); + return ESJsonLayout.createLayout(type, charset, split); + } + + public Charset getCharset() { + return charset; + } + + public B setCharset(final Charset charset) { + this.charset = charset; + return asBuilder(); + } + + public String getType() { + return type; + } + + public B setType(final String type) { + this.type = type; + return asBuilder(); + } + + public String getESMessageFields() { + return esMessageFields; + } + + public B setESMessageFields(String esmessagefields) { + this.esMessageFields = esmessagefields; + return asBuilder(); + } + } + + @PluginBuilderFactory + public static > B newBuilder() { + return new ESJsonLayout.Builder().asBuilder(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java b/server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java new file mode 100644 index 0000000000000..cf19697e1c5a5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.SuppressLoggerChecks; + +import java.nio.charset.Charset; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * A base class for custom log4j logger messages. Carries additional fields which will populate JSON fields in logs. + */ +public abstract class ESLogMessage extends ParameterizedMessage { + private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); + private final Map fields; + + /** + * This is an abstract class, so this is safe. The check is done on DeprecationMessage. + * Other subclasses are not allowing varargs + */ + @SuppressLoggerChecks(reason = "Safe as this is abstract class") + public ESLogMessage(Map fields, String messagePattern, Object... args) { + super(messagePattern, args); + this.fields = fields; + } + + public static String escapeJson(String text) { + byte[] sourceEscaped = JSON_STRING_ENCODER.quoteAsUTF8(text); + return new String(sourceEscaped, Charset.defaultCharset()); + } + + public String getValueFor(String key) { + Object value = fields.get(key); + return value!=null ? value.toString() : null; + } + + public static String inQuotes(String s) { + if(s == null) + return inQuotes(""); + return "\"" + s + "\""; + } + + public static String inQuotes(Object s) { + if(s == null) + return inQuotes(""); + return inQuotes(s.toString()); + } + + public static String asJsonArray(Stream stream) { + return "[" + stream + .map(ESLogMessage::inQuotes) + .collect(Collectors.joining(", ")) + "]"; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESMessageFieldConverter.java b/server/src/main/java/org/elasticsearch/common/logging/ESMessageFieldConverter.java new file mode 100644 index 0000000000000..40d702bc76bc5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/ESMessageFieldConverter.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.pattern.ConverterKeys; +import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; +import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.logging.log4j.util.StringBuilders; +import org.elasticsearch.common.Strings; + +/** + * Pattern converter to populate ESMessageField in a pattern. + * It will only populate these if the event have message of type ESLogMessage. + */ +@Plugin(category = PatternConverter.CATEGORY, name = "ESMessageField") +@ConverterKeys({"ESMessageField"}) +public final class ESMessageFieldConverter extends LogEventPatternConverter { + + private String key; + + /** + * Called by log4j2 to initialize this converter. + */ + public static ESMessageFieldConverter newInstance(final Configuration config, final String[] options) { + final String key = options[0]; + + return new ESMessageFieldConverter(key); + } + + public ESMessageFieldConverter(String key) { + super("ESMessageField", "ESMessageField"); + this.key = key; + } + + @Override + public void format(LogEvent event, StringBuilder toAppendTo) { + if (event.getMessage() instanceof ESLogMessage) { + ESLogMessage logMessage = (ESLogMessage) event.getMessage(); + final String value = logMessage.getValueFor(key); + if (Strings.isNullOrEmpty(value) == false) { + StringBuilders.appendValue(toAppendTo, value); + return; + } + } + StringBuilders.appendValue(toAppendTo, ""); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 58c9cbc520456..094ce5349b862 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.settings; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; @@ -90,12 +90,7 @@ public SettingsModule( } this.indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values())); this.clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values()), clusterSettingUpgraders); - Settings indexSettings = settings.filter((s) -> (s.startsWith("index.") && - // special case - we want to get Did you mean indices.query.bool.max_clause_count - // which means we need to by-pass this check for this setting - // TODO remove in 6.0!! - "index.query.bool.max_clause_count".equals(s) == false) - && clusterSettings.get(s) == null); + Settings indexSettings = settings.filter((s) -> s.startsWith("index.") && clusterSettings.get(s) == null); if (indexSettings.isEmpty() == false) { try { String separator = IntStream.range(0, 85).mapToObj(s -> "*").collect(Collectors.joining("")).trim(); diff --git a/server/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java b/server/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java index 336b9c536a1ee..13e0f2b3a7a24 100644 --- a/server/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java +++ b/server/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; @@ -32,13 +32,19 @@ * * */ -public class BoundTransportAddress implements Streamable { +public class BoundTransportAddress implements Writeable { private TransportAddress[] boundAddresses; private TransportAddress publishAddress; - BoundTransportAddress() { + public BoundTransportAddress(StreamInput in) throws IOException { + int boundAddressLength = in.readInt(); + boundAddresses = new TransportAddress[boundAddressLength]; + for (int i = 0; i < boundAddressLength; i++) { + boundAddresses[i] = new TransportAddress(in); + } + publishAddress = new TransportAddress(in); } public BoundTransportAddress(TransportAddress[] boundAddresses, TransportAddress publishAddress) { @@ -57,22 +63,6 @@ public TransportAddress publishAddress() { return publishAddress; } - public static BoundTransportAddress readBoundTransportAddress(StreamInput in) throws IOException { - BoundTransportAddress addr = new BoundTransportAddress(); - addr.readFrom(in); - return addr; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - int boundAddressLength = in.readInt(); - boundAddresses = new TransportAddress[boundAddressLength]; - for (int i = 0; i < boundAddressLength; i++) { - boundAddresses[i] = new TransportAddress(in); - } - publishAddress = new TransportAddress(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeInt(boundAddresses.length); diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index b52bc2b199c80..d94bf7bf410eb 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.tasks.Task; import java.io.Closeable; import java.io.IOException; @@ -129,7 +130,18 @@ public void close() { */ public StoredContext stashContext() { final ThreadContextStruct context = threadLocal.get(); - threadLocal.set(null); + /** + * X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. + * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. + * Otherwise when context is stash, it should be empty. + */ + if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { + ThreadContextStruct threadContextStruct = + DEFAULT_CONTEXT.putHeaders(Map.of(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID))); + threadLocal.set(threadContextStruct); + } else { + threadLocal.set(null); + } return () -> { // If the node and thus the threadLocal get closed while this task // is still executing, we don't want this runnable to fail with an @@ -243,7 +255,31 @@ public void writeTo(StreamOutput out) throws IOException { * Reads the headers from the stream into the current context */ public void readHeaders(StreamInput in) throws IOException { - threadLocal.set(new ThreadContext.ThreadContextStruct(in)); + final Map requestHeaders = in.readMap(StreamInput::readString, StreamInput::readString); + final Map> responseHeaders = in.readMap(StreamInput::readString, input -> { + final int size = input.readVInt(); + if (size == 0) { + return Collections.emptySet(); + } else if (size == 1) { + return Collections.singleton(input.readString()); + } else { + // use a linked hash set to preserve order + final LinkedHashSet values = new LinkedHashSet<>(size); + for (int i = 0; i < size; i++) { + final String value = input.readString(); + final boolean added = values.add(value); + assert added : value; + } + return values; + } + }); + final ThreadContextStruct struct; + if (requestHeaders.isEmpty() && responseHeaders.isEmpty()) { + struct = ThreadContextStruct.EMPTY; + } else { + struct = new ThreadContextStruct(requestHeaders, responseHeaders, Collections.emptyMap(), false); + } + threadLocal.set(struct); } /** @@ -402,7 +438,7 @@ public boolean isSystemContext() { /** * Returns true if the context is closed, otherwise true */ - boolean isClosed() { + public boolean isClosed() { return threadLocal.closed.get(); } @@ -417,40 +453,16 @@ default void restore() { } private static final class ThreadContextStruct { + + private static final ThreadContextStruct EMPTY = + new ThreadContextStruct(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), false); + private final Map requestHeaders; private final Map transientHeaders; private final Map> responseHeaders; private final boolean isSystemContext; - private long warningHeadersSize; //saving current warning headers' size not to recalculate the size with every new warning header - private ThreadContextStruct(StreamInput in) throws IOException { - final int numRequest = in.readVInt(); - Map requestHeaders = numRequest == 0 ? Collections.emptyMap() : new HashMap<>(numRequest); - for (int i = 0; i < numRequest; i++) { - requestHeaders.put(in.readString(), in.readString()); - } - - this.requestHeaders = requestHeaders; - this.responseHeaders = in.readMap(StreamInput::readString, input -> { - final int size = input.readVInt(); - if (size == 0) { - return Collections.emptySet(); - } else if (size == 1) { - return Collections.singleton(input.readString()); - } else { - // use a linked hash set to preserve order - final LinkedHashSet values = new LinkedHashSet<>(size); - for (int i = 0; i < size; i++) { - final String value = input.readString(); - final boolean added = values.add(value); - assert added : value; - } - return values; - } - }); - this.transientHeaders = Collections.emptyMap(); - isSystemContext = false; // we never serialize this it's a transient flag - this.warningHeadersSize = 0L; - } + //saving current warning headers' size not to recalculate the size with every new warning header + private final long warningHeadersSize; private ThreadContextStruct setSystemContext() { if (isSystemContext) { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/server/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java index 6e9b53a73615c..91d8ecadce1a7 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java @@ -98,7 +98,10 @@ private static void extractRawValues(List values, List part, String[] pa } public static Object extractValue(String path, Map map) { - String[] pathElements = path.split("\\."); + return extractValue(map, path.split("\\.")); + } + + public static Object extractValue(Map map, String... pathElements) { if (pathElements.length == 0) { return null; } diff --git a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index d03f6abf7d9bd..bbcbaa9ceb559 100644 --- a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -73,7 +73,6 @@ public interface Lister, N private final AtomicLong round = new AtomicLong(); private boolean closed; - @SuppressWarnings("unchecked") protected AsyncShardFetch(Logger logger, String type, ShardId shardId, Lister, T> action) { this.logger = logger; this.type = type; diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index c59d52c60be7a..e8205d2d14939 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -24,6 +24,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -39,17 +41,20 @@ public class Gateway { private final ClusterService clusterService; - private final TransportNodesListGatewayMetaState listGatewayMetaState; + private final NodeClient client; - public Gateway(final ClusterService clusterService, final TransportNodesListGatewayMetaState listGatewayMetaState) { + public Gateway(final ClusterService clusterService, final NodeClient client) { this.clusterService = clusterService; - this.listGatewayMetaState = listGatewayMetaState; + this.client = client; } public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException { final String[] nodesIds = clusterService.state().nodes().getMasterNodes().keys().toArray(String.class); logger.trace("performing state recovery from {}", Arrays.toString(nodesIds)); - final TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds, null).actionGet(); + var request = new TransportNodesListGatewayMetaState.Request(nodesIds); + PlainActionFuture future = PlainActionFuture.newFuture(); + client.executeLocally(TransportNodesListGatewayMetaState.TYPE, request, future); + final TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = future.actionGet(); final int requiredAllocation = 1; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 7451f25460e0c..e18e51c76a477 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -25,17 +25,23 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.gateway.AsyncShardFetch.Lister; +import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; +import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData; import java.util.List; import java.util.concurrent.ConcurrentMap; @@ -49,18 +55,16 @@ public class GatewayAllocator { private final PrimaryShardAllocator primaryShardAllocator; private final ReplicaShardAllocator replicaShardAllocator; - private final ConcurrentMap> + private final ConcurrentMap> asyncFetchStarted = ConcurrentCollections.newConcurrentMap(); - private final ConcurrentMap> + private final ConcurrentMap> asyncFetchStore = ConcurrentCollections.newConcurrentMap(); @Inject - public GatewayAllocator(RerouteService rerouteService, - TransportNodesListGatewayStartedShards startedAction, - TransportNodesListShardStoreMetaData storeAction) { + public GatewayAllocator(RerouteService rerouteService, NodeClient client) { this.rerouteService = rerouteService; - this.primaryShardAllocator = new InternalPrimaryShardAllocator(startedAction); - this.replicaShardAllocator = new InternalReplicaShardAllocator(storeAction); + this.primaryShardAllocator = new InternalPrimaryShardAllocator(client); + this.replicaShardAllocator = new InternalReplicaShardAllocator(client); } public void cleanCaches() { @@ -79,10 +83,10 @@ protected GatewayAllocator() { public int getNumberOfInFlightFetch() { int count = 0; - for (AsyncShardFetch fetch : asyncFetchStarted.values()) { + for (AsyncShardFetch fetch : asyncFetchStarted.values()) { count += fetch.getNumberOfInFlightFetches(); } - for (AsyncShardFetch fetch : asyncFetchStore.values()) { + for (AsyncShardFetch fetch : asyncFetchStore.values()) { count += fetch.getNumberOfInFlightFetches(); } return count; @@ -103,6 +107,8 @@ public void applyFailedShards(final RoutingAllocation allocation, final List extends AsyncShardFetch @Override protected void reroute(ShardId shardId, String reason) { logger.trace("{} scheduling reroute for {}", shardId, reason); - rerouteService.reroute("async_shard_fetch", ActionListener.wrap( + assert rerouteService != null; + rerouteService.reroute("async_shard_fetch", Priority.HIGH, ActionListener.wrap( r -> logger.trace("{} scheduled reroute completed for {}", shardId, reason), e -> logger.debug(new ParameterizedMessage("{} scheduled reroute failed for {}", shardId, reason), e))); } @@ -147,19 +156,20 @@ protected void reroute(ShardId shardId, String reason) { class InternalPrimaryShardAllocator extends PrimaryShardAllocator { - private final TransportNodesListGatewayStartedShards startedAction; + private final NodeClient client; - InternalPrimaryShardAllocator(TransportNodesListGatewayStartedShards startedAction) { - this.startedAction = startedAction; + InternalPrimaryShardAllocator(NodeClient client) { + this.client = client; } @Override - protected AsyncShardFetch.FetchResult - fetchData(ShardRouting shard, RoutingAllocation allocation) { - AsyncShardFetch fetch = + protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { + // explicitely type lister, some IDEs (Eclipse) are not able to correctly infer the function type + Lister, NodeGatewayStartedShards> lister = this::listStartedShards; + AsyncShardFetch fetch = asyncFetchStarted.computeIfAbsent(shard.shardId(), - shardId -> new InternalAsyncFetch<>(logger, "shard_started", shardId, startedAction)); - AsyncShardFetch.FetchResult shardState = + shardId -> new InternalAsyncFetch<>(logger, "shard_started", shardId, lister)); + AsyncShardFetch.FetchResult shardState = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); if (shardState.hasData()) { @@ -167,23 +177,31 @@ class InternalPrimaryShardAllocator extends PrimaryShardAllocator { } return shardState; } + + private void listStartedShards(ShardId shardId, DiscoveryNode[] nodes, + ActionListener> listener) { + var request = new TransportNodesListGatewayStartedShards.Request(shardId, nodes); + client.executeLocally(TransportNodesListGatewayStartedShards.TYPE, request, + ActionListener.wrap(listener::onResponse, listener::onFailure)); + } } class InternalReplicaShardAllocator extends ReplicaShardAllocator { - private final TransportNodesListShardStoreMetaData storeAction; + private final NodeClient client; - InternalReplicaShardAllocator(TransportNodesListShardStoreMetaData storeAction) { - this.storeAction = storeAction; + InternalReplicaShardAllocator(NodeClient client) { + this.client = client; } @Override - protected AsyncShardFetch.FetchResult + protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { - AsyncShardFetch fetch = - asyncFetchStore.computeIfAbsent(shard.shardId(), - shardId -> new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction)); - AsyncShardFetch.FetchResult shardStores = + // explicitely type lister, some IDEs (Eclipse) are not able to correctly infer the function type + Lister, NodeStoreFilesMetaData> lister = this::listStoreFilesMetaData; + AsyncShardFetch fetch = asyncFetchStore.computeIfAbsent(shard.shardId(), + shardId -> new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), lister)); + AsyncShardFetch.FetchResult shardStores = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); if (shardStores.hasData()) { shardStores.processAllocation(allocation); @@ -191,6 +209,13 @@ class InternalReplicaShardAllocator extends ReplicaShardAllocator { return shardStores; } + private void listStoreFilesMetaData(ShardId shardId, DiscoveryNode[] nodes, + ActionListener> listener) { + var request = new TransportNodesListShardStoreMetaData.Request(shardId, nodes); + client.executeLocally(TransportNodesListShardStoreMetaData.TYPE, request, + ActionListener.wrap(listener::onResponse, listener::onFailure)); + } + @Override protected boolean hasInitiatedFetching(ShardRouting shard) { return asyncFetchStore.get(shard.shardId()) != null; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayModule.java b/server/src/main/java/org/elasticsearch/gateway/GatewayModule.java index b958e42a8b563..80eb9553bd9d5 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayModule.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayModule.java @@ -28,8 +28,6 @@ public class GatewayModule extends AbstractModule { protected void configure() { bind(DanglingIndicesState.class).asEagerSingleton(); bind(GatewayService.class).asEagerSingleton(); - bind(TransportNodesListGatewayMetaState.class).asEagerSingleton(); - bind(TransportNodesListGatewayStartedShards.class).asEagerSingleton(); bind(LocalAllocateDangledIndices.class).asEagerSingleton(); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 3e9c25847f6a7..86f05f398d1f8 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -90,9 +91,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste @Inject public GatewayService(final Settings settings, final AllocationService allocationService, final ClusterService clusterService, - final ThreadPool threadPool, - final TransportNodesListGatewayMetaState listGatewayMetaState, - final Discovery discovery) { + final ThreadPool threadPool, final Discovery discovery, final NodeClient client) { this.allocationService = allocationService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -121,7 +120,7 @@ public GatewayService(final Settings settings, final AllocationService allocatio recoveryRunnable = () -> clusterService.submitStateUpdateTask("local-gateway-elected-state", new RecoverStateUpdateTask()); } else { - final Gateway gateway = new Gateway(clusterService, listGatewayMetaState); + final Gateway gateway = new Gateway(clusterService, client); recoveryRunnable = () -> gateway.performStateRecovery(new GatewayRecoveryListener()); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 1c98dd38c273f..6f4a4f301aa78 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -73,7 +73,7 @@ public LocalAllocateDangledIndices(TransportService transportService, ClusterSer this.clusterService = clusterService; this.allocationService = allocationService; this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; - transportService.registerRequestHandler(ACTION_NAME, AllocateDangledRequest::new, ThreadPool.Names.SAME, + transportService.registerRequestHandler(ACTION_NAME, ThreadPool.Names.SAME, AllocateDangledRequest::new, new AllocateDangledRequestHandler()); } @@ -210,17 +210,8 @@ public static class AllocateDangledRequest extends TransportRequest { DiscoveryNode fromNode; IndexMetaData[] indices; - public AllocateDangledRequest() { - } - - AllocateDangledRequest(DiscoveryNode fromNode, IndexMetaData[] indices) { - this.fromNode = fromNode; - this.indices = indices; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public AllocateDangledRequest(StreamInput in) throws IOException { + super(in); fromNode = new DiscoveryNode(in); indices = new IndexMetaData[in.readVInt()]; for (int i = 0; i < indices.length; i++) { @@ -228,6 +219,11 @@ public void readFrom(StreamInput in) throws IOException { } } + AllocateDangledRequest(DiscoveryNode fromNode, IndexMetaData[] indices) { + this.fromNode = fromNode; + this.indices = indices; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index d06ad91dbbcf2..1168aac6b2f92 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -19,10 +19,9 @@ package org.elasticsearch.gateway; -import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesRequest; @@ -32,11 +31,9 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -50,6 +47,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesAction { public static final String ACTION_NAME = "internal:gateway/local/meta_state"; + public static final ActionType TYPE = new ActionType<>(ACTION_NAME, NodesGatewayMetaState::new); private final GatewayMetaState metaState; @@ -61,20 +59,14 @@ public TransportNodesListGatewayMetaState(ThreadPool threadPool, ClusterService this.metaState = metaState; } - public ActionFuture list(String[] nodesIds, @Nullable TimeValue timeout) { - PlainActionFuture future = PlainActionFuture.newFuture(); - execute(new Request(nodesIds).timeout(timeout), future); - return future; - } - @Override protected NodeRequest newNodeRequest(Request request) { return new NodeRequest(); } @Override - protected NodeGatewayMetaState newNodeResponse() { - return new NodeGatewayMetaState(); + protected NodeGatewayMetaState newNodeResponse(StreamInput in) throws IOException { + return new NodeGatewayMetaState(in); } @Override @@ -89,7 +81,8 @@ protected NodeGatewayMetaState nodeOperation(NodeRequest request, Task task) { public static class Request extends BaseNodesRequest { - public Request() { + public Request(StreamInput in) throws IOException { + super(in); } public Request(String... nodesIds) { @@ -99,29 +92,41 @@ public Request(String... nodesIds) { public static class NodesGatewayMetaState extends BaseNodesResponse { + public NodesGatewayMetaState(StreamInput in) throws IOException { + super(in); + } + public NodesGatewayMetaState(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readStreamableList(NodeGatewayMetaState::new); + return in.readList(NodeGatewayMetaState::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } } public static class NodeRequest extends BaseNodeRequest { + NodeRequest() {} + NodeRequest(StreamInput in) throws IOException { + super(in); + } } public static class NodeGatewayMetaState extends BaseNodeResponse { private MetaData metaData; - NodeGatewayMetaState() { + public NodeGatewayMetaState(StreamInput in) throws IOException { + super(in); + if (in.readBoolean()) { + metaData = MetaData.readFrom(in); + } } public NodeGatewayMetaState(DiscoveryNode node, MetaData metaData) { @@ -133,14 +138,6 @@ public MetaData metaData() { return metaData; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - if (in.readBoolean()) { - metaData = MetaData.readFrom(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index be33bb031c32c..ca68dfc9c1ccc 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; @@ -62,12 +62,11 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction - implements - AsyncShardFetch.Lister { public static final String ACTION_NAME = "internal:gateway/local/started_shards"; + public static final ActionType TYPE = new ActionType<>(ACTION_NAME, NodesGatewayStartedShards::new); + private final Settings settings; private final NodeEnvironment nodeEnv; private final IndicesService indicesService; @@ -86,20 +85,14 @@ public TransportNodesListGatewayStartedShards(Settings settings, ThreadPool thre this.namedXContentRegistry = namedXContentRegistry; } - @Override - public void list(ShardId shardId, DiscoveryNode[] nodes, - ActionListener listener) { - execute(new Request(shardId, nodes), listener); - } - @Override protected NodeRequest newNodeRequest(Request request) { return new NodeRequest(request); } @Override - protected NodeGatewayStartedShards newNodeResponse() { - return new NodeGatewayStartedShards(); + protected NodeGatewayStartedShards newNodeResponse(StreamInput in) throws IOException { + return new NodeGatewayStartedShards(in); } @Override @@ -171,7 +164,9 @@ public static class Request extends BaseNodesRequest { private ShardId shardId; - public Request() { + public Request(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); } public Request(ShardId shardId, DiscoveryNode[] nodes) { @@ -184,12 +179,6 @@ public ShardId shardId() { return this.shardId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -199,6 +188,10 @@ public void writeTo(StreamOutput out) throws IOException { public static class NodesGatewayStartedShards extends BaseNodesResponse { + public NodesGatewayStartedShards(StreamInput in) throws IOException { + super(in); + } + public NodesGatewayStartedShards(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); @@ -206,12 +199,12 @@ public NodesGatewayStartedShards(ClusterName clusterName, List readNodesFrom(StreamInput in) throws IOException { - return in.readStreamableList(NodeGatewayStartedShards::new); + return in.readList(NodeGatewayStartedShards::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } } @@ -220,19 +213,15 @@ public static class NodeRequest extends BaseNodeRequest { private ShardId shardId; - public NodeRequest() { + public NodeRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); } public NodeRequest(Request request) { this.shardId = request.shardId(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -250,7 +239,13 @@ public static class NodeGatewayStartedShards extends BaseNodeResponse { private boolean primary = false; private Exception storeException = null; - public NodeGatewayStartedShards() { + public NodeGatewayStartedShards(StreamInput in) throws IOException { + super(in); + allocationId = in.readOptionalString(); + primary = in.readBoolean(); + if (in.readBoolean()) { + storeException = in.readException(); + } } public NodeGatewayStartedShards(DiscoveryNode node, String allocationId, boolean primary) { @@ -276,16 +271,6 @@ public Exception storeException() { return this.storeException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - allocationId = in.readOptionalString(); - primary = in.readBoolean(); - if (in.readBoolean()) { - storeException = in.readException(); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/http/CorsHandler.java b/server/src/main/java/org/elasticsearch/http/CorsHandler.java index 6bd0b09265dfc..3dfde32322c9a 100644 --- a/server/src/main/java/org/elasticsearch/http/CorsHandler.java +++ b/server/src/main/java/org/elasticsearch/http/CorsHandler.java @@ -68,6 +68,11 @@ public class CorsHandler { public static final String ANY_ORIGIN = "*"; + public static final String ORIGIN = "origin"; + public static final String DATE = "date"; + public static final String VARY = "vary"; + public static final String ACCESS_CONTROL_REQUEST_METHOD = "access-control-request-method"; + public static final String ACCESS_CONTROL_ALLOW_ORIGIN = "access-control-allow-origin"; private CorsHandler() { } diff --git a/server/src/main/java/org/elasticsearch/http/HttpInfo.java b/server/src/main/java/org/elasticsearch/http/HttpInfo.java index a24f508edc113..cf9e4672d6253 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpInfo.java +++ b/server/src/main/java/org/elasticsearch/http/HttpInfo.java @@ -48,7 +48,7 @@ public class HttpInfo implements Writeable, ToXContentFragment { private final boolean cnameInPublishHost; public HttpInfo(StreamInput in) throws IOException { - this(BoundTransportAddress.readBoundTransportAddress(in), in.readLong(), CNAME_IN_PUBLISH_HOST); + this(new BoundTransportAddress(in), in.readLong(), CNAME_IN_PUBLISH_HOST); } public HttpInfo(BoundTransportAddress address, long maxContentLength) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index b4b471e220a77..faeb9d3bc26e8 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -19,10 +19,12 @@ package org.elasticsearch.index; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.util.StringBuilders; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.ESLogMessage; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -35,7 +37,9 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.util.HashMap; import java.util.Locale; +import java.util.Map; import java.util.concurrent.TimeUnit; public final class IndexingSlowLog implements IndexingOperationListener { @@ -149,34 +153,58 @@ public void postIndex(ShardId shardId, Engine.Index indexOperation, Engine.Index final ParsedDocument doc = indexOperation.parsedDoc(); final long tookInNanos = result.getTook(); if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { - indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.warn( new IndexingSlowLogMessage(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { - indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.info(new IndexingSlowLogMessage(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { - indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.debug(new IndexingSlowLogMessage(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { - indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.trace( new IndexingSlowLogMessage(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } } } - static final class SlowLogParsedDocumentPrinter { - private final ParsedDocument doc; - private final long tookInNanos; - private final boolean reformat; - private final int maxSourceCharsToLog; - private final Index index; + static final class IndexingSlowLogMessage extends ESLogMessage { + + IndexingSlowLogMessage(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { + super(prepareMap(index,doc,tookInNanos,reformat,maxSourceCharsToLog), + message(index,doc,tookInNanos,reformat,maxSourceCharsToLog)); + } + + private static Map prepareMap(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, + int maxSourceCharsToLog) { + Map map = new HashMap<>(); + map.put("message", index); + map.put("took", TimeValue.timeValueNanos(tookInNanos)); + map.put("took_millis", ""+TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + map.put("doc_type", doc.type()); + map.put("id", doc.id()); + map.put("routing", doc.routing()); - SlowLogParsedDocumentPrinter(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { - this.doc = doc; - this.index = index; - this.tookInNanos = tookInNanos; - this.reformat = reformat; - this.maxSourceCharsToLog = maxSourceCharsToLog; + if (maxSourceCharsToLog == 0 || doc.source() == null || doc.source().length() == 0) { + return map; + } + try { + String source = XContentHelper.convertToJson(doc.source(), reformat, doc.getXContentType()); + String trim = Strings.cleanTruncate(source, maxSourceCharsToLog).trim(); + StringBuilder sb = new StringBuilder(trim); + StringBuilders.escapeJson(sb,0); + map.put("source", sb.toString()); + } catch (IOException e) { + StringBuilder sb = new StringBuilder("_failed_to_convert_[" + e.getMessage()+"]"); + StringBuilders.escapeJson(sb,0); + map.put("source", sb.toString()); + /* + * We choose to fail to write to the slow log and instead let this percolate up to the post index listener loop where this + * will be logged at the warn level. + */ + final String message = String.format(Locale.ROOT, "failed to convert source for slow log entry [%s]", map.toString()); + throw new UncheckedIOException(message, e); + } + return map; } - @Override - public String toString() { + private static String message(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { StringBuilder sb = new StringBuilder(); sb.append(index).append(" "); sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], "); diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index a2193a1bdf73d..f3db24a89f82a 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.ESLogMessage; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -32,7 +33,10 @@ import org.elasticsearch.tasks.Task; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; public final class SearchSlowLog implements SearchOperationListener { private long queryWarnThreshold; @@ -121,49 +125,74 @@ private void setLevel(SlowLogLevel level) { Loggers.setLevel(queryLogger, level.name()); Loggers.setLevel(fetchLogger, level.name()); } + @Override public void onQueryPhase(SearchContext context, long tookInNanos) { if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) { - queryLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos)); + queryLogger.warn(new SearchSlowLogMessage(context, tookInNanos)); } else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) { - queryLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos)); + queryLogger.info(new SearchSlowLogMessage(context, tookInNanos)); } else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) { - queryLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos)); + queryLogger.debug(new SearchSlowLogMessage(context, tookInNanos)); } else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) { - queryLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos)); + queryLogger.trace(new SearchSlowLogMessage(context, tookInNanos)); } } @Override public void onFetchPhase(SearchContext context, long tookInNanos) { if (fetchWarnThreshold >= 0 && tookInNanos > fetchWarnThreshold) { - fetchLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos)); + fetchLogger.warn(new SearchSlowLogMessage(context, tookInNanos)); } else if (fetchInfoThreshold >= 0 && tookInNanos > fetchInfoThreshold) { - fetchLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos)); + fetchLogger.info(new SearchSlowLogMessage(context, tookInNanos)); } else if (fetchDebugThreshold >= 0 && tookInNanos > fetchDebugThreshold) { - fetchLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos)); + fetchLogger.debug(new SearchSlowLogMessage(context, tookInNanos)); } else if (fetchTraceThreshold >= 0 && tookInNanos > fetchTraceThreshold) { - fetchLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos)); + fetchLogger.trace(new SearchSlowLogMessage(context, tookInNanos)); } } - static final class SlowLogSearchContextPrinter { - private final SearchContext context; - private final long tookInNanos; + static final class SearchSlowLogMessage extends ESLogMessage { + + SearchSlowLogMessage(SearchContext context, long tookInNanos) { + super(prepareMap(context, tookInNanos), message(context, tookInNanos)); + } + + private static Map prepareMap(SearchContext context, long tookInNanos) { + Map messageFields = new HashMap<>(); + messageFields.put("message", context.indexShard().shardId()); + messageFields.put("took", TimeValue.timeValueNanos(tookInNanos)); + messageFields.put("took_millis", TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + if (context.queryResult().getTotalHits() != null) { + messageFields.put("total_hits", context.queryResult().getTotalHits()); + } else { + messageFields.put("total_hits", "-1"); + } + messageFields.put("stats", escapeJson(asJsonArray( + context.groupStats() != null ? context.groupStats().stream() : Stream.empty()))); + messageFields.put("search_type", context.searchType()); + messageFields.put("total_shards", context.numberOfShards()); + + if (context.request().source() != null) { + String source = escapeJson(context.request().source().toString(FORMAT_PARAMS)); + + messageFields.put("source", source); + } else { + messageFields.put("source", "{}"); + } - SlowLogSearchContextPrinter(SearchContext context, long tookInNanos) { - this.context = context; - this.tookInNanos = tookInNanos; + messageFields.put("id", context.getTask().getHeader(Task.X_OPAQUE_ID)); + return messageFields; } - @Override - public String toString() { + // Message will be used in plaintext logs + private static String message(SearchContext context, long tookInNanos) { StringBuilder sb = new StringBuilder(); sb.append(context.indexShard().shardId()) - .append(" ") - .append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], ") - .append("took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], ") - .append("total_hits["); + .append(" ") + .append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], ") + .append("took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], ") + .append("total_hits["); if (context.queryResult().getTotalHits() != null) { sb.append(context.queryResult().getTotalHits()); } else { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/server/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java index 0c53cc323d678..d7e8696421db8 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java @@ -53,8 +53,8 @@ public NamedAnalyzer(String name, AnalyzerScope scope, Analyzer analyzer) { this.scope = scope; this.analyzer = analyzer; this.positionIncrementGap = positionIncrementGap; - if (analyzer instanceof org.elasticsearch.index.analysis.CustomAnalyzer) { - this.analysisMode = ((org.elasticsearch.index.analysis.CustomAnalyzer) analyzer).getAnalysisMode(); + if (analyzer instanceof org.elasticsearch.index.analysis.AnalyzerComponentsProvider) { + this.analysisMode = ((org.elasticsearch.index.analysis.AnalyzerComponentsProvider) analyzer).getComponents().analysisMode(); } else { this.analysisMode = AnalysisMode.ALL; } diff --git a/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java b/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java index 1b5616f1a3c14..03106e20837e1 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java +++ b/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java @@ -22,7 +22,6 @@ import org.apache.lucene.search.DocIdSet; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -31,7 +30,7 @@ import java.io.IOException; -public class QueryCacheStats implements Streamable, Writeable, ToXContentFragment { +public class QueryCacheStats implements Writeable, ToXContentFragment { private long ramBytesUsed; private long hitCount; @@ -116,11 +115,6 @@ public long getEvictions() { return cacheCount - cacheSize; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(ramBytesUsed); diff --git a/server/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java b/server/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java index 797aab684f90f..a7a19dda441e3 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java +++ b/server/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -29,7 +28,7 @@ import java.io.IOException; -public class RequestCacheStats implements Streamable, Writeable, ToXContentFragment { +public class RequestCacheStats implements Writeable, ToXContentFragment { private long memorySize; private long evictions; @@ -80,11 +79,6 @@ public long getMissCount() { return this.missCount; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(memorySize); diff --git a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java index 16ee4e419efa8..22587cf6aad79 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.SegmentInfos; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -35,12 +35,12 @@ import static java.util.Map.entry; /** a class the returns dynamic information with respect to the last commit point of this shard */ -public final class CommitStats implements Streamable, ToXContentFragment { +public final class CommitStats implements Writeable, ToXContentFragment { - private Map userData; - private long generation; - private String id; // lucene commit id in base 64; - private int numDocs; + private final Map userData; + private final long generation; + private final String id; // lucene commit id in base 64; + private final int numDocs; public CommitStats(SegmentInfos segmentInfos) { // clone the map to protect against concurrent changes @@ -51,11 +51,20 @@ public CommitStats(SegmentInfos segmentInfos) { numDocs = Lucene.getNumDocs(segmentInfos); } - private CommitStats() { + CommitStats(StreamInput in) throws IOException { + final int length = in.readVInt(); + final var entries = new ArrayList>(length); + for (int i = length; i > 0; i--) { + entries.add(entry(in.readString(), in.readString())); + } + userData = Maps.ofEntries(entries); + generation = in.readLong(); + id = in.readOptionalString(); + numDocs = in.readInt(); } public static CommitStats readOptionalCommitStatsFrom(StreamInput in) throws IOException { - return in.readOptionalStreamable(CommitStats::new); + return in.readOptionalWriteable(CommitStats::new); } @@ -93,19 +102,6 @@ public int getNumDocs() { return numDocs; } - @Override - public void readFrom(StreamInput in) throws IOException { - final int length = in.readVInt(); - final var entries = new ArrayList>(length); - for (int i = length; i > 0; i--) { - entries.add(entry(in.readString(), in.readString())); - } - userData = Maps.ofEntries(entries); - generation = in.readLong(); - id = in.readOptionalString(); - numDocs = in.readInt(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(userData.size()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index cfdb61a6964df..af0adfdedcf45 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -929,7 +929,11 @@ public IndexResult index(Index index) throws IOException { } } catch (RuntimeException | IOException e) { try { - maybeFailEngine("index", e); + if (e instanceof AlreadyClosedException == false && treatDocumentFailureAsTragicError(index)) { + failEngine("index id[" + index.id() + "] origin[" + index.origin() + "] seq#[" + index.seqNo() + "]", e); + } else { + maybeFailEngine("index id[" + index.id() + "] origin[" + index.origin() + "] seq#[" + index.seqNo() + "]", e); + } } catch (Exception inner) { e.addSuppressed(inner); } @@ -1055,7 +1059,8 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } return new IndexResult(plan.versionForIndexing, index.primaryTerm(), index.seqNo(), plan.currentNotFoundOrDeleted); } catch (Exception ex) { - if (indexWriter.getTragicException() == null) { + if (ex instanceof AlreadyClosedException == false && + indexWriter.getTragicException() == null && treatDocumentFailureAsTragicError(index) == false) { /* There is no tragic event recorded so this must be a document failure. * * The handling inside IW doesn't guarantee that an tragic / aborting exception @@ -1076,6 +1081,16 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } } + /** + * Whether we should treat any document failure as tragic error. + * If we hit any failure while processing an indexing on a replica, we should treat that error as tragic and fail the engine. + * However, we prefer to fail a request individually (instead of a shard) if we hit a document failure on the primary. + */ + private boolean treatDocumentFailureAsTragicError(Index index) { + // TODO: can we enable this all origins except primary on the leader? + return index.origin() == Operation.Origin.REPLICA; + } + /** * returns true if the indexing operation may have already be processed by this engine. * Note that it is OK to rarely return true even if this is not the case. However a `false` diff --git a/server/src/main/java/org/elasticsearch/index/engine/Segment.java b/server/src/main/java/org/elasticsearch/index/engine/Segment.java index b1e6d09d897f2..2ed7ebc0152d7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; @@ -41,7 +41,7 @@ import java.util.Map; import java.util.Objects; -public class Segment implements Streamable { +public class Segment implements Writeable { private String name; private long generation; @@ -58,7 +58,28 @@ public class Segment implements Streamable { public Accountable ramTree = null; public Map attributes; - Segment() { + public Segment(StreamInput in) throws IOException { + name = in.readString(); + generation = Long.parseLong(name.substring(1), Character.MAX_RADIX); + committed = in.readBoolean(); + search = in.readBoolean(); + docCount = in.readInt(); + delDocCount = in.readInt(); + sizeInBytes = in.readLong(); + version = Lucene.parseVersionLenient(in.readOptionalString(), null); + compound = in.readOptionalBoolean(); + mergeId = in.readOptionalString(); + memoryInBytes = in.readLong(); + if (in.readBoolean()) { + // verbose mode + ramTree = readRamTree(in); + } + segmentSort = readSegmentSort(in); + if (in.readBoolean()) { + attributes = in.readMap(StreamInput::readString, StreamInput::readString); + } else { + attributes = null; + } } public Segment(String name) { @@ -150,37 +171,6 @@ public int hashCode() { return name != null ? name.hashCode() : 0; } - public static Segment readSegment(StreamInput in) throws IOException { - Segment segment = new Segment(); - segment.readFrom(in); - return segment; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - generation = Long.parseLong(name.substring(1), Character.MAX_RADIX); - committed = in.readBoolean(); - search = in.readBoolean(); - docCount = in.readInt(); - delDocCount = in.readInt(); - sizeInBytes = in.readLong(); - version = Lucene.parseVersionLenient(in.readOptionalString(), null); - compound = in.readOptionalBoolean(); - mergeId = in.readOptionalString(); - memoryInBytes = in.readLong(); - if (in.readBoolean()) { - // verbose mode - ramTree = readRamTree(in); - } - segmentSort = readSegmentSort(in); - if (in.readBoolean()) { - attributes = in.readMap(StreamInput::readString, StreamInput::readString); - } else { - attributes = null; - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index ae78de574531f..dbca398689371 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -31,7 +30,7 @@ import java.io.IOException; -public class SegmentsStats implements Streamable, Writeable, ToXContentFragment { +public class SegmentsStats implements Writeable, ToXContentFragment { private long count; private long memoryInBytes; @@ -365,11 +364,6 @@ static final class Fields { static final String DESCRIPTION = "description"; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(count); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java b/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java index c85255a8a1c12..d2cf5cda7a63e 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -32,7 +31,7 @@ import java.io.IOException; import java.util.Objects; -public class FieldDataStats implements Streamable, Writeable, ToXContentFragment { +public class FieldDataStats implements Writeable, ToXContentFragment { private static final String FIELDDATA = "fielddata"; private static final String MEMORY_SIZE = "memory_size"; @@ -89,11 +88,6 @@ public FieldMemoryStats getFields() { return fields; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(memorySize); diff --git a/server/src/main/java/org/elasticsearch/index/flush/FlushStats.java b/server/src/main/java/org/elasticsearch/index/flush/FlushStats.java index 92887f4fb2e69..da4926abb9fa6 100644 --- a/server/src/main/java/org/elasticsearch/index/flush/FlushStats.java +++ b/server/src/main/java/org/elasticsearch/index/flush/FlushStats.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -29,7 +28,7 @@ import java.io.IOException; -public class FlushStats implements Streamable, Writeable, ToXContentFragment { +public class FlushStats implements Writeable, ToXContentFragment { private long total; private long periodic; @@ -116,11 +115,6 @@ static final class Fields { static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index ffaa42ce0ad21..f2fd252d7644e 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; @@ -48,7 +48,7 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; -public class GetResult implements Streamable, Iterable, ToXContentObject { +public class GetResult implements Writeable, Iterable, ToXContentObject { public static final String _INDEX = "_index"; public static final String _TYPE = "_type"; @@ -72,7 +72,29 @@ public class GetResult implements Streamable, Iterable, ToXConten private BytesReference source; private byte[] sourceAsBytes; - GetResult() { + public GetResult(StreamInput in) throws IOException { + index = in.readString(); + type = in.readOptionalString(); + id = in.readString(); + seqNo = in.readZLong(); + primaryTerm = in.readVLong(); + version = in.readLong(); + exists = in.readBoolean(); + if (exists) { + source = in.readBytesReference(); + if (source.length() == 0) { + source = null; + } + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + documentFields = readFields(in); + metaFields = readFields(in); + } else { + Map fields = readFields(in); + documentFields = new HashMap<>(); + metaFields = new HashMap<>(); + splitFieldsByMetadata(fields, documentFields, metaFields); + } + } } public GetResult(String index, String type, String id, long seqNo, long primaryTerm, long version, boolean exists, @@ -376,12 +398,6 @@ public static GetResult fromXContent(XContentParser parser) throws IOException { return fromXContentEmbedded(parser); } - public static GetResult readGetResult(StreamInput in) throws IOException { - GetResult result = new GetResult(); - result.readFrom(in); - return result; - } - private Map readFields(StreamInput in) throws IOException { Map fields = null; int size = in.readVInt(); @@ -390,7 +406,7 @@ private Map readFields(StreamInput in) throws IOException } else { fields = new HashMap<>(size); for (int i = 0; i < size; i++) { - DocumentField field = DocumentField.readDocumentField(in); + DocumentField field = new DocumentField(in); fields.put(field.getName(), field); } } @@ -410,32 +426,6 @@ static void splitFieldsByMetadata(Map fields, Map fields = readFields(in); - documentFields = new HashMap<>(); - metaFields = new HashMap<>(); - splitFieldsByMetadata(fields, documentFields, metaFields); - } - } - } @Override public void writeTo(StreamOutput out) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/get/GetStats.java b/server/src/main/java/org/elasticsearch/index/get/GetStats.java index e13115ab8057b..72e93989b3ca7 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetStats.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetStats.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -29,7 +28,7 @@ import java.io.IOException; -public class GetStats implements Streamable, Writeable, ToXContentFragment { +public class GetStats implements Writeable, ToXContentFragment { private long existsCount; private long existsTimeInMillis; @@ -143,11 +142,6 @@ static final class Fields { static final String CURRENT = "current"; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(existsCount); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 044e65c7ec6fb..8de282787cc8a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -86,7 +86,7 @@ public Builder(RootObjectMapper.Builder builder, MapperService mapperService) { if (existingMetadataMapper == null) { final TypeParser parser = entry.getValue(); metadataMapper = parser.getDefault(mapperService.fullName(name), - mapperService.documentMapperParser().parserContext(builder.name())); + mapperService.documentMapperParser().parserContext()); } else { metadataMapper = existingMetadataMapper; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index 8028960ab4262..4c071f946ccee 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -64,8 +64,8 @@ public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperSer this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(indexVersionCreated); } - public Mapper.TypeParser.ParserContext parserContext(String type) { - return new Mapper.TypeParser.ParserContext(type, similarityService::getSimilarity, mapperService, + public Mapper.TypeParser.ParserContext parserContext() { + return new Mapper.TypeParser.ParserContext(similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, queryShardContextSupplier); } @@ -101,7 +101,7 @@ private DocumentMapper parse(String type, Map mapping, String de } - Mapper.TypeParser.ParserContext parserContext = parserContext(type); + Mapper.TypeParser.ParserContext parserContext = parserContext(); // parse RootObjectMapper DocumentMapper.Builder docBuilder = new DocumentMapper.Builder( (RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 07231759113e9..2a17f551126f6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -670,7 +670,7 @@ private static Mapper.Builder createBuilderFromFieldType(final ParseContext } } if (builder == null) { - Mapper.TypeParser.ParserContext parserContext = context.docMapperParser().parserContext(currentFieldName); + Mapper.TypeParser.ParserContext parserContext = context.docMapperParser().parserContext(); Mapper.TypeParser typeParser = parserContext.typeParser(fieldType.typeName()); if (typeParser == null) { throw new MapperParsingException("Cannot generate dynamic mappings of type [" + fieldType.typeName() diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 1621f60b9b784..bc3a4b08e273e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -301,38 +301,23 @@ public void parse(ParseContext context) throws IOException { XContentParser.Token token = context.parser().currentToken(); if (token == XContentParser.Token.START_ARRAY) { token = context.parser().nextToken(); - if (token == XContentParser.Token.START_ARRAY) { - // its an array of array of lon/lat [ [1.2, 1.3], [1.4, 1.5] ] - while (token != XContentParser.Token.END_ARRAY) { - parseGeoPointIgnoringMalformed(context, sparse); - token = context.parser().nextToken(); + if (token == XContentParser.Token.VALUE_NUMBER) { + double lon = context.parser().doubleValue(); + context.parser().nextToken(); + double lat = context.parser().doubleValue(); + token = context.parser().nextToken(); + if (token == XContentParser.Token.VALUE_NUMBER) { + GeoPoint.assertZValue(ignoreZValue.value(), context.parser().doubleValue()); + } else if (token != XContentParser.Token.END_ARRAY) { + throw new ElasticsearchParseException("[{}] field type does not accept > 3 dimensions", CONTENT_TYPE); } + parse(context, sparse.reset(lat, lon)); } else { - // its an array of other possible values - if (token == XContentParser.Token.VALUE_NUMBER) { - double lon = context.parser().doubleValue(); - context.parser().nextToken(); - double lat = context.parser().doubleValue(); + while (token != XContentParser.Token.END_ARRAY) { + parseGeoPointIgnoringMalformed(context, sparse); token = context.parser().nextToken(); - if (token == XContentParser.Token.VALUE_NUMBER) { - GeoPoint.assertZValue(ignoreZValue.value(), context.parser().doubleValue()); - } else if (token != XContentParser.Token.END_ARRAY) { - throw new ElasticsearchParseException("[{}] field type does not accept > 3 dimensions", CONTENT_TYPE); - } - parse(context, sparse.reset(lat, lon)); - } else { - while (token != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - parseGeoPointStringIgnoringMalformed(context, sparse); - } else { - parseGeoPointIgnoringMalformed(context, sparse); - } - token = context.parser().nextToken(); - } } } - } else if (token == XContentParser.Token.VALUE_STRING) { - parseGeoPointStringIgnoringMalformed(context, sparse); } else if (token == XContentParser.Token.VALUE_NULL) { if (fieldType.nullValue() != null) { parse(context, (GeoPoint) fieldType.nullValue()); @@ -353,21 +338,7 @@ public void parse(ParseContext context) throws IOException { */ private void parseGeoPointIgnoringMalformed(ParseContext context, GeoPoint sparse) throws IOException { try { - parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse)); - } catch (ElasticsearchParseException e) { - if (ignoreMalformed.value() == false) { - throw e; - } - context.addIgnoredField(fieldType.name()); - } - } - - /** - * Parses geopoint represented as a string and ignores malformed geopoints if needed - */ - private void parseGeoPointStringIgnoringMalformed(ParseContext context, GeoPoint sparse) throws IOException { - try { - parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value())); + parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse, ignoreZValue.value())); } catch (ElasticsearchParseException e) { if (ignoreMalformed.value() == false) { throw e; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 6449c06fbe1ad..2ce1d5328f3b9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -24,8 +24,9 @@ import org.apache.lucene.geo.Polygon; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.geo.GeometryIndexer; +import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.builders.ShapeBuilder; -import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.geo.geometry.Circle; import org.elasticsearch.geo.geometry.Geometry; @@ -91,12 +92,17 @@ public GeoShapeFieldType clone() { } } + private final GeometryParser geometryParser; + private final GeometryIndexer geometryIndexer; + public GeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Explicit ignoreMalformed, Explicit coerce, Explicit ignoreZValue, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, ignoreZValue, indexSettings, multiFields, copyTo); + geometryParser = new GeometryParser(orientation() == ShapeBuilder.Orientation.RIGHT, coerce().value(), ignoreZValue.value()); + geometryIndexer = new GeometryIndexer(true); } @Override @@ -108,13 +114,14 @@ public GeoShapeFieldType fieldType() { @Override public void parse(ParseContext context) throws IOException { try { + Object shape = context.parseExternalValue(Object.class); if (shape == null) { - ShapeBuilder shapeBuilder = ShapeParser.parse(context.parser(), this); - if (shapeBuilder == null) { + Geometry geometry = geometryParser.parse(context.parser()); + if (geometry == null) { return; } - shape = shapeBuilder.buildGeometry(); + shape = geometryIndexer.prepareForIndexing(geometry); } indexShape(context, shape); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 411045abaf796..82a0239777e30 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -34,7 +34,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.intervals.IntervalsSource; +import org.apache.lucene.queries.intervals.IntervalsSource; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.util.BytesRef; @@ -99,13 +99,15 @@ public MappedFieldType() { @Override public abstract MappedFieldType clone(); - /** Return a fielddata builder for this field - * @throws IllegalArgumentException if the fielddata is not supported on this type. - * An IllegalArgumentException is needed in order to return an http error 400 - * when this error occurs in a request. see: {@link org.elasticsearch.ExceptionsHelper#status} + /** + * Return a fielddata builder for this field * * @param fullyQualifiedIndexName the name of the index this field-data is build for - * */ + * + * @throws IllegalArgumentException if the fielddata is not supported on this type. + * An IllegalArgumentException is needed in order to return an http error 400 + * when this error occurs in a request. see: {@link org.elasticsearch.ExceptionsHelper#status} + */ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { throw new IllegalArgumentException("Fielddata is not supported on field [" + name() + "] of type [" + typeName() + "]"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 5de5394a94abe..e9ccd69b20cc5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -78,8 +78,6 @@ public interface TypeParser { class ParserContext { - private final String type; - private final Function similarityLookupService; private final MapperService mapperService; @@ -90,10 +88,9 @@ class ParserContext { private final Supplier queryShardContextSupplier; - public ParserContext(String type, Function similarityLookupService, + public ParserContext(Function similarityLookupService, MapperService mapperService, Function typeParsers, Version indexVersionCreated, Supplier queryShardContextSupplier) { - this.type = type; this.similarityLookupService = similarityLookupService; this.mapperService = mapperService; this.typeParsers = typeParsers; @@ -101,10 +98,6 @@ public ParserContext(String type, Function similarit this.queryShardContextSupplier = queryShardContextSupplier; } - public String type() { - return type; - } - public IndexAnalyzers getIndexAnalyzers() { return mapperService.getIndexAnalyzers(); } @@ -141,7 +134,7 @@ public ParserContext createMultiFieldContext(ParserContext in) { static class MultiFieldParserContext extends ParserContext { MultiFieldParserContext(ParserContext in) { - super(in.type(), in.similarityLookupService(), in.mapperService(), in.typeParsers(), + super(in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.queryShardContextSupplier()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 6002348050700..90cbf40f924e8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -777,7 +777,7 @@ public MappedFieldType unmappedFieldType(String type) { } MappedFieldType fieldType = unmappedFieldTypes.get(type); if (fieldType == null) { - final Mapper.TypeParser.ParserContext parserContext = documentMapperParser().parserContext(type); + final Mapper.TypeParser.ParserContext parserContext = documentMapperParser().parserContext(); Mapper.TypeParser typeParser = parserContext.typeParser(type); if (typeParser == null) { throw new IllegalArgumentException("No mapper found for type [" + type + "]"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 89b1810bf393c..350cad6efa2c6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -247,7 +247,7 @@ public Mapper.Builder findTemplateBuilder(ParseContext context, String name, Str if (dynamicTemplate == null) { return null; } - Mapper.TypeParser.ParserContext parserContext = context.docMapperParser().parserContext(name); + Mapper.TypeParser.ParserContext parserContext = context.docMapperParser().parserContext(); String mappingType = dynamicTemplate.mappingType(dynamicType); Mapper.TypeParser typeParser = parserContext.typeParser(mappingType); if (typeParser == null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index d1ae07a7f0b9d..df0177a41a57c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -26,12 +26,15 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.shingle.FixedShingleFilter; +import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; +import org.apache.lucene.queries.intervals.Intervals; +import org.apache.lucene.queries.intervals.IntervalsSource; import org.apache.lucene.search.AutomatonQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -44,8 +47,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.intervals.Intervals; -import org.apache.lucene.search.intervals.IntervalsSource; import org.apache.lucene.search.spans.FieldMaskingSpanQuery; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.spans.SpanNearQuery; @@ -421,13 +422,13 @@ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, Quer public IntervalsSource intervals(BytesRef term) { if (term.length > maxChars) { - return Intervals.prefix(term.utf8ToString()); + return Intervals.prefix(term); } if (term.length >= minChars) { return Intervals.fixField(name(), Intervals.term(term)); } String wildcardTerm = term.utf8ToString() + "?".repeat(Math.max(0, minChars - term.length)); - return Intervals.or(Intervals.fixField(name(), Intervals.wildcard(wildcardTerm)), Intervals.term(term)); + return Intervals.or(Intervals.fixField(name(), Intervals.wildcard(new BytesRef(wildcardTerm))), Intervals.term(term)); } @Override @@ -671,7 +672,7 @@ public IntervalsSource intervals(String text, int maxGaps, boolean ordered, if (prefixFieldType != null) { return prefixFieldType.intervals(normalizedTerm); } - return Intervals.prefix(normalizedTerm.utf8ToString()); // TODO make Intervals.prefix() take a BytesRef + return Intervals.prefix(normalizedTerm); } IntervalBuilder builder = new IntervalBuilder(name(), analyzer == null ? searchAnalyzer() : analyzer); return builder.analyzeText(text, maxGaps, ordered); @@ -680,7 +681,10 @@ public IntervalsSource intervals(String text, int maxGaps, boolean ordered, @Override public Query phraseQuery(TokenStream stream, int slop, boolean enablePosIncrements) throws IOException { String field = name(); - if (indexPhrases && slop == 0 && hasGaps(stream) == false) { + // we can't use the index_phrases shortcut with slop, if there are gaps in the stream, + // or if the incoming token stream is the output of a token graph due to + // https://issues.apache.org/jira/browse/LUCENE-8916 + if (indexPhrases && slop == 0 && hasGaps(stream) == false && stream.hasAttribute(BytesTermAttribute.class) == false) { stream = new FixedShingleFilter(stream, 2); field = field + FAST_PHRASE_SUFFIX; } @@ -693,6 +697,9 @@ public Query phraseQuery(TokenStream stream, int slop, boolean enablePosIncremen stream.reset(); while (stream.incrementToken()) { + if (termAtt.getBytesRef() == null) { + throw new IllegalStateException("Null term while building phrase query"); + } if (enablePosIncrements) { position += posIncrAtt.getPositionIncrement(); } diff --git a/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java b/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java index d94c3f71900d2..ccb2013e6fd8c 100644 --- a/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -30,7 +29,7 @@ import java.io.IOException; -public class MergeStats implements Streamable, Writeable, ToXContentFragment { +public class MergeStats implements Writeable, ToXContentFragment { private long total; private long totalTimeInMillis; @@ -238,11 +237,6 @@ static final class Fields { static final String TOTAL_THROTTLE_BYTES_PER_SEC = "total_auto_throttle"; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index b4631787af0f7..24850b22cc36f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -335,7 +335,7 @@ public InnerHitBuilder setDocValueFields(List docValueFields) { * Adds a field to load from the docvalue and return. */ public InnerHitBuilder addDocValueField(String field, String format) { - if (docValueFields == null) { + if (docValueFields == null || docValueFields.isEmpty() == true) { docValueFields = new ArrayList<>(); } docValueFields.add(new FieldAndFormat(field, format)); diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java index 5e1047684840f..a1970759460a3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java @@ -29,9 +29,9 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MatchesIterator; import org.apache.lucene.search.QueryVisitor; -import org.apache.lucene.search.intervals.IntervalIterator; -import org.apache.lucene.search.intervals.Intervals; -import org.apache.lucene.search.intervals.IntervalsSource; +import org.apache.lucene.queries.intervals.IntervalIterator; +import org.apache.lucene.queries.intervals.Intervals; +import org.apache.lucene.queries.intervals.IntervalsSource; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings; diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java b/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java index 306560b3d5fb6..1f86179dca73c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.intervals.IntervalIterator; +import org.apache.lucene.queries.intervals.IntervalIterator; import org.elasticsearch.script.ScriptContext; /** diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalQueryBuilder.java index b9ea7d96e4648..4813adfc4d3db 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalQueryBuilder.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.intervals.IntervalQuery; +import org.apache.lucene.queries.intervals.IntervalQuery; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java index d4d28057c12d0..b81206c7f87e5 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java @@ -20,10 +20,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.search.intervals.FilteredIntervalsSource; -import org.apache.lucene.search.intervals.IntervalIterator; -import org.apache.lucene.search.intervals.Intervals; -import org.apache.lucene.search.intervals.IntervalsSource; +import org.apache.lucene.queries.intervals.FilteredIntervalsSource; +import org.apache.lucene.queries.intervals.IntervalIterator; +import org.apache.lucene.queries.intervals.Intervals; +import org.apache.lucene.queries.intervals.IntervalsSource; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -585,12 +585,12 @@ public IntervalsSource getSource(QueryShardContext context, MappedFieldType fiel } BytesRef normalizedTerm = analyzer.normalize(useField, pattern); // TODO Intervals.wildcard() should take BytesRef - source = Intervals.fixField(useField, Intervals.wildcard(normalizedTerm.utf8ToString())); + source = Intervals.fixField(useField, Intervals.wildcard(normalizedTerm)); } else { checkPositions(fieldType); BytesRef normalizedTerm = analyzer.normalize(fieldType.name(), pattern); - source = Intervals.wildcard(normalizedTerm.utf8ToString()); + source = Intervals.wildcard(normalizedTerm); } return source; } diff --git a/server/src/main/java/org/elasticsearch/index/recovery/RecoveryStats.java b/server/src/main/java/org/elasticsearch/index/recovery/RecoveryStats.java index 7fdc767fb9429..c2ad6297d63bc 100644 --- a/server/src/main/java/org/elasticsearch/index/recovery/RecoveryStats.java +++ b/server/src/main/java/org/elasticsearch/index/recovery/RecoveryStats.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -34,7 +33,7 @@ * Recovery related statistics, starting at the shard level and allowing aggregation to * indices and node level */ -public class RecoveryStats implements ToXContentFragment, Writeable, Streamable { +public class RecoveryStats implements ToXContentFragment, Writeable { private final AtomicInteger currentAsSource = new AtomicInteger(); private final AtomicInteger currentAsTarget = new AtomicInteger(); @@ -122,11 +121,6 @@ static final class Fields { static final String THROTTLE_TIME_IN_MILLIS = "throttle_time_in_millis"; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(currentAsSource.get()); diff --git a/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java b/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java index 6488de5df0aca..c0e365df774db 100644 --- a/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -31,7 +30,7 @@ import java.io.IOException; import java.util.Objects; -public class RefreshStats implements Streamable, Writeable, ToXContentFragment { +public class RefreshStats implements Writeable, ToXContentFragment { private long total; @@ -151,11 +150,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != RefreshStats.class) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index 0f27fb4753e9b..23f87587296f2 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -109,10 +109,18 @@ public abstract class AbstractBulkByScrollRequest bulkFailures, @@ -193,16 +199,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(timedOut); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - took = in.readTimeValue(); - status = new BulkByScrollTask.Status(in); - bulkFailures = in.readList(Failure::new); - searchFailures = in.readList(ScrollableHitSource.SearchFailure::new); - timedOut = in.readBoolean(); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(TOOK_FIELD, took.millis()); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java index 092f93ed2e54e..9c7f0d1a1a473 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java @@ -19,19 +19,14 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class DeleteByQueryAction extends StreamableResponseActionType { +public class DeleteByQueryAction extends ActionType { public static final DeleteByQueryAction INSTANCE = new DeleteByQueryAction(); public static final String NAME = "indices:data/write/delete/byquery"; private DeleteByQueryAction() { - super(NAME); - } - - @Override - public BulkByScrollResponse newResponse() { - return new BulkByScrollResponse(); + super(NAME, BulkByScrollResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java index c1e2f011a99de..a8d48010f25e2 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java @@ -66,7 +66,7 @@ public DeleteByQueryRequest(String... indices) { } public DeleteByQueryRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); } private DeleteByQueryRequest(SearchRequest search, boolean setDefaults) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java index 9ad9baa5abb90..fcac9ed435237 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java @@ -19,18 +19,13 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ReindexAction extends StreamableResponseActionType { +public class ReindexAction extends ActionType { public static final ReindexAction INSTANCE = new ReindexAction(); public static final String NAME = "indices:data/write/reindex"; private ReindexAction() { - super(NAME); - } - - @Override - public BulkByScrollResponse newResponse() { - return new BulkByScrollResponse(); + super(NAME, BulkByScrollResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index bce848998159c..85c64a379d181 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -86,7 +86,7 @@ private ReindexRequest(SearchRequest search, IndexRequest destination, boolean s } public ReindexRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); destination = new IndexRequest(in); remoteInfo = in.readOptionalWriteable(RemoteInfo::new); } @@ -265,11 +265,6 @@ public ReindexRequest forSlice(TaskId slicingTask, SearchRequest slice, int tota return sliced; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java index 3c0523d34ee56..4c296a3493ed8 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java @@ -19,18 +19,13 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class UpdateByQueryAction extends StreamableResponseActionType { +public class UpdateByQueryAction extends ActionType { public static final UpdateByQueryAction INSTANCE = new UpdateByQueryAction(); public static final String NAME = "indices:data/write/update/byquery"; private UpdateByQueryAction() { - super(NAME); - } - - @Override - public BulkByScrollResponse newResponse() { - return new BulkByScrollResponse(); + super(NAME, BulkByScrollResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index 138af524bf9c9..9d48acce3d437 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -57,7 +57,7 @@ public UpdateByQueryRequest(String... indices) { } public UpdateByQueryRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); pipeline = in.readOptionalString(); } @@ -171,11 +171,6 @@ public IndicesOptions indicesOptions() { return getSearchRequest().indicesOptions(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index a7f29d14b49b8..cd79aa9d60e2e 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -52,12 +51,7 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction< ReplicationResponse> { public static String ACTION_NAME = "indices:admin/seq_no/global_checkpoint_sync"; - public static ActionType TYPE = new StreamableResponseActionType<>(ACTION_NAME) { - @Override - public ReplicationResponse newResponse() { - return new ReplicationResponse(); - } - }; + public static ActionType TYPE = new ActionType<>(ACTION_NAME, ReplicationResponse::new); @Inject public GlobalCheckpointSyncAction( @@ -85,8 +79,8 @@ public GlobalCheckpointSyncAction( } @Override - protected ReplicationResponse newResponseInstance() { - return new ReplicationResponse(); + protected ReplicationResponse newResponseInstance(StreamInput in) throws IOException { + return new ReplicationResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index f2e44893a015b..c588a02d85f7b 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -22,7 +22,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; @@ -123,13 +123,13 @@ protected boolean resolveIndex(final T request) { } - public static class Add extends StreamableResponseActionType { + public static class Add extends ActionType { public static final Add INSTANCE = new Add(); public static final String ACTION_NAME = "indices:admin/seq_no/add_retention_lease"; private Add() { - super(ACTION_NAME); + super(ACTION_NAME, Response::new); } public static class TransportAction extends TransportRetentionLeaseAction { @@ -168,21 +168,15 @@ protected Writeable.Reader getResponseReader() { } } - - @Override - public Response newResponse() { - return new Response(); - } - } - public static class Renew extends StreamableResponseActionType { + public static class Renew extends ActionType { public static final Renew INSTANCE = new Renew(); public static final String ACTION_NAME = "indices:admin/seq_no/renew_retention_lease"; private Renew() { - super(ACTION_NAME); + super(ACTION_NAME, Response::new); } public static class TransportAction extends TransportRetentionLeaseAction { @@ -214,21 +208,15 @@ void doRetentionLeaseAction(final IndexShard indexShard, final RenewRequest requ } } - - @Override - public Response newResponse() { - return new Response(); - } - } - public static class Remove extends StreamableResponseActionType { + public static class Remove extends ActionType { public static final Remove INSTANCE = new Remove(); public static final String ACTION_NAME = "indices:admin/seq_no/remove_retention_lease"; private Remove() { - super(ACTION_NAME); + super(ACTION_NAME, Response::new); } public static class TransportAction extends TransportRetentionLeaseAction { @@ -261,12 +249,6 @@ void doRetentionLeaseAction(final IndexShard indexShard, final RemoveRequest req } } - - @Override - public Response newResponse() { - return new Response(); - } - } private abstract static class Request> extends SingleShardRequest { @@ -385,8 +367,7 @@ public RemoveRequest(final ShardId shardId, final String id) { public static class Response extends ActionResponse { - public Response() { - } + public Response() {} Response(final StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index c9fe1d31bd1d5..d93500a5c6a72 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.replication.ReplicationRequest; @@ -58,12 +57,7 @@ public class RetentionLeaseBackgroundSyncAction extends TransportReplicationActi ReplicationResponse> { public static String ACTION_NAME = "indices:admin/seq_no/retention_lease_background_sync"; - public static ActionType TYPE = new StreamableResponseActionType<>(ACTION_NAME) { - @Override - public ReplicationResponse newResponse() { - return new ReplicationResponse(); - } - }; + public static ActionType TYPE = new ActionType<>(ACTION_NAME, ReplicationResponse::new); private static final Logger LOGGER = LogManager.getLogger(RetentionLeaseSyncAction.class); @@ -137,11 +131,6 @@ public Request(final ShardId shardId, final RetentionLeases retentionLeases) { waitForActiveShards(ActiveShardCount.NONE); } - @Override - public void readFrom(final StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(Objects.requireNonNull(out)); @@ -162,8 +151,8 @@ public String toString() { } @Override - protected ReplicationResponse newResponseInstance() { - return new ReplicationResponse(); + protected ReplicationResponse newResponseInstance(StreamInput in) throws IOException { + return new ReplicationResponse(in); } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 052e72185c501..69de0ed64f9ce 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteResponse; @@ -56,12 +55,7 @@ public class RetentionLeaseSyncAction extends TransportWriteAction { public static String ACTION_NAME = "indices:admin/seq_no/retention_lease_sync"; - public static ActionType TYPE = new StreamableResponseActionType<>(ACTION_NAME) { - @Override - public Response newResponse() { - return new Response(); - } - }; + public static ActionType TYPE = new ActionType<>(ACTION_NAME, Response::new); private static final Logger LOGGER = LogManager.getLogger(RetentionLeaseSyncAction.class); @@ -141,11 +135,6 @@ public Request(final ShardId shardId, final RetentionLeases retentionLeases) { waitForActiveShards(ActiveShardCount.NONE); } - @Override - public void readFrom(final StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(Objects.requireNonNull(out)); @@ -167,6 +156,12 @@ public String toString() { public static final class Response extends ReplicationResponse implements WriteResponse { + public Response() {} + + Response(StreamInput in) throws IOException { + super(in); + } + @Override public void setForcedRefresh(final boolean forcedRefresh) { // ignore @@ -175,8 +170,8 @@ public void setForcedRefresh(final boolean forcedRefresh) { } @Override - protected Response newResponseInstance() { - return new Response(); + protected Response newResponseInstance(StreamInput in) throws IOException { + return new Response(in); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/DocsStats.java b/server/src/main/java/org/elasticsearch/index/shard/DocsStats.java index 163738e5c9b3b..f757f2ae15aec 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/DocsStats.java +++ b/server/src/main/java/org/elasticsearch/index/shard/DocsStats.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -29,7 +28,7 @@ import java.io.IOException; -public class DocsStats implements Streamable, Writeable, ToXContentFragment { +public class DocsStats implements Writeable, ToXContentFragment { private long count = 0; private long deleted = 0; @@ -88,11 +87,6 @@ public long getAverageSizeInBytes() { return totalDocs == 0 ? 0 : totalSizeInBytes / totalDocs; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(count); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java b/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java index 1bc51926fcbff..17ac5a90e31a3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -33,9 +32,9 @@ import java.util.HashMap; import java.util.Map; -public class IndexingStats implements Streamable, Writeable, ToXContentFragment { +public class IndexingStats implements Writeable, ToXContentFragment { - public static class Stats implements Streamable, Writeable, ToXContentFragment { + public static class Stats implements Writeable, ToXContentFragment { private long indexCount; private long indexTimeInMillis; @@ -147,11 +146,6 @@ public long getNoopUpdateCount() { return noopUpdateCount; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(indexCount); @@ -282,11 +276,6 @@ static final class Fields { static final String THROTTLED_TIME = "throttle_time"; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { totalStats.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index 0fc4e20df6bfa..cfd0d99e74c75 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -472,8 +472,7 @@ private void printRerouteCommand(ShardPath shardPath, Terminal terminal, boolean : new AllocateEmptyPrimaryAllocationCommand(index, id, nodeId, false)); terminal.println(""); - terminal.println("POST /_cluster/reroute'\n" - + Strings.toString(commands, true, true) + "'"); + terminal.println("POST /_cluster/reroute\n" + Strings.toString(commands, true, true)); terminal.println(""); terminal.println("You must accept the possibility of data loss by changing parameter `accept_data_loss` to `true`."); terminal.println(""); diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java index 3a9443636ea7e..c9da135058556 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -29,7 +28,7 @@ import java.io.IOException; -public class StoreStats implements Streamable, Writeable, ToXContentFragment { +public class StoreStats implements Writeable, ToXContentFragment { private long sizeInBytes; @@ -69,11 +68,6 @@ public ByteSizeValue getSize() { return size(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(sizeInBytes); diff --git a/server/src/main/java/org/elasticsearch/index/translog/Checkpoint.java b/server/src/main/java/org/elasticsearch/index/translog/Checkpoint.java index 364203f898cc1..fe20a52f482f3 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Checkpoint.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Checkpoint.java @@ -20,6 +20,9 @@ package org.elasticsearch.index.translog; import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.Directory; @@ -33,6 +36,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.channels.FileChannel; +import java.nio.file.NoSuchFileException; import java.nio.file.OpenOption; import java.nio.file.Path; @@ -47,41 +51,19 @@ final class Checkpoint { final long minTranslogGeneration; final long trimmedAboveSeqNo; - private static final int INITIAL_VERSION = 1; // start with 1, just to recognize there was some magic serialization logic before - private static final int VERSION_6_0_0 = 2; // introduction of global checkpoints - private static final int CURRENT_VERSION = 3; // introduction of trimmed above seq# + private static final int CURRENT_VERSION = 3; private static final String CHECKPOINT_CODEC = "ckp"; - // size of 6.4.0 checkpoint - static final int V3_FILE_SIZE = CodecUtil.headerLength(CHECKPOINT_CODEC) + Integer.BYTES // ops + Long.BYTES // offset + Long.BYTES // generation - + Long.BYTES // minimum sequence number, introduced in 6.0.0 - + Long.BYTES // maximum sequence number, introduced in 6.0.0 - + Long.BYTES // global checkpoint, introduced in 6.0.0 - + Long.BYTES // minimum translog generation in the translog - introduced in 6.0.0 - + Long.BYTES // maximum reachable (trimmed) sequence number, introduced in 6.4.0 - + CodecUtil.footerLength(); - - // size of 6.0.0 checkpoint - static final int V2_FILE_SIZE = CodecUtil.headerLength(CHECKPOINT_CODEC) - + Integer.BYTES // ops - + Long.BYTES // offset - + Long.BYTES // generation - + Long.BYTES // minimum sequence number, introduced in 6.0.0 - + Long.BYTES // maximum sequence number, introduced in 6.0.0 - + Long.BYTES // global checkpoint, introduced in 6.0.0 - + Long.BYTES // minimum translog generation in the translog - introduced in 6.0.0 - + CodecUtil.footerLength(); - - // size of 5.0.0 checkpoint - static final int V1_FILE_SIZE = CodecUtil.headerLength(CHECKPOINT_CODEC) - + Integer.BYTES // ops - + Long.BYTES // offset - + Long.BYTES // generation + + Long.BYTES // minimum sequence number + + Long.BYTES // maximum sequence number + + Long.BYTES // global checkpoint + + Long.BYTES // minimum translog generation in the translog + + Long.BYTES // maximum reachable (trimmed) sequence number + CodecUtil.footerLength(); /** @@ -132,7 +114,7 @@ static Checkpoint emptyTranslogCheckpoint(final long offset, final long generati return new Checkpoint(offset, 0, generation, minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration, trimmedAboveSeqNo); } - static Checkpoint readCheckpointV6_4_0(final DataInput in) throws IOException { + static Checkpoint readCheckpointV3(final DataInput in) throws IOException { final long offset = in.readLong(); final int numOps = in.readInt(); final long generation = in.readLong(); @@ -144,31 +126,6 @@ static Checkpoint readCheckpointV6_4_0(final DataInput in) throws IOException { return new Checkpoint(offset, numOps, generation, minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration, trimmedAboveSeqNo); } - static Checkpoint readCheckpointV6_0_0(final DataInput in) throws IOException { - final long offset = in.readLong(); - final int numOps = in.readInt(); - final long generation = in.readLong(); - final long minSeqNo = in.readLong(); - final long maxSeqNo = in.readLong(); - final long globalCheckpoint = in.readLong(); - final long minTranslogGeneration = in.readLong(); - final long trimmedAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - return new Checkpoint(offset, numOps, generation, minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration, trimmedAboveSeqNo); - } - - // reads a checksummed checkpoint introduced in ES 5.0.0 - static Checkpoint readCheckpointV5_0_0(final DataInput in) throws IOException { - final long offset = in.readLong(); - final int numOps = in.readInt(); - final long generation = in.readLong(); - final long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - final long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - final long globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - final long minTranslogGeneration = -1; - final long trimmedAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - return new Checkpoint(offset, numOps, generation, minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration, trimmedAboveSeqNo); - } - @Override public String toString() { return "Checkpoint{" + @@ -188,18 +145,12 @@ public static Checkpoint read(Path path) throws IOException { try (IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) { // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. CodecUtil.checksumEntireFile(indexInput); - final int fileVersion = CodecUtil.checkHeader(indexInput, CHECKPOINT_CODEC, INITIAL_VERSION, CURRENT_VERSION); - if (fileVersion == INITIAL_VERSION) { - assert indexInput.length() == V1_FILE_SIZE : indexInput.length(); - return Checkpoint.readCheckpointV5_0_0(indexInput); - } else if (fileVersion == VERSION_6_0_0) { - assert indexInput.length() == V2_FILE_SIZE : indexInput.length(); - return Checkpoint.readCheckpointV6_0_0(indexInput); - } else { - assert fileVersion == CURRENT_VERSION : fileVersion; - assert indexInput.length() == V3_FILE_SIZE : indexInput.length(); - return Checkpoint.readCheckpointV6_4_0(indexInput); - } + final int fileVersion = CodecUtil.checkHeader(indexInput, CHECKPOINT_CODEC, CURRENT_VERSION, CURRENT_VERSION); + assert fileVersion == CURRENT_VERSION : fileVersion; + assert indexInput.length() == V3_FILE_SIZE : indexInput.length(); + return Checkpoint.readCheckpointV3(indexInput); + } catch (CorruptIndexException | NoSuchFileException | IndexFormatTooOldException | IndexFormatTooNewException e) { + throw new TranslogCorruptedException(path.toString(), e); } } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 82ad2046c510b..b98f0f2b6438d 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -219,28 +219,28 @@ public Translog( private ArrayList recoverFromFiles(Checkpoint checkpoint) throws IOException { boolean success = false; ArrayList foundTranslogs = new ArrayList<>(); - try (ReleasableLock lock = writeLock.acquire()) { + try (ReleasableLock ignored = writeLock.acquire()) { logger.debug("open uncommitted translog checkpoint {}", checkpoint); final long minGenerationToRecoverFrom = checkpoint.minTranslogGeneration; assert minGenerationToRecoverFrom >= 0 : "minTranslogGeneration should be non-negative"; - final String checkpointTranslogFile = getFilename(checkpoint.generation); - // we open files in reverse order in order to validate tranlsog uuid before we start traversing the translog based on + // we open files in reverse order in order to validate the translog uuid before we start traversing the translog based on // the generation id we found in the lucene commit. This gives for better error messages if the wrong // translog was found. - foundTranslogs.add(openReader(location.resolve(checkpointTranslogFile), checkpoint)); - for (long i = checkpoint.generation - 1; i >= minGenerationToRecoverFrom; i--) { + for (long i = checkpoint.generation; i >= minGenerationToRecoverFrom; i--) { Path committedTranslogFile = location.resolve(getFilename(i)); if (Files.exists(committedTranslogFile) == false) { - throw new IllegalStateException("translog file doesn't exist with generation: " + i + " recovering from: " + - minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive"); + throw new TranslogCorruptedException(committedTranslogFile.toString(), + "translog file doesn't exist with generation: " + i + " recovering from: " + minGenerationToRecoverFrom + + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive"); } - final TranslogReader reader = openReader(committedTranslogFile, - Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)))); + final Checkpoint readerCheckpoint = i == checkpoint.generation ? checkpoint + : Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))); + final TranslogReader reader = openReader(committedTranslogFile, readerCheckpoint); assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() : - "Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "]" + - "translog path [ " + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]"; + "Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "] translog path [ " + + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]"; foundTranslogs.add(reader); logger.debug("recovered local translog from checkpoint {}", checkpoint); } @@ -255,8 +255,9 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws if (Files.exists(commitCheckpoint)) { Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint); if (checkpoint.equals(checkpointFromDisk) == false) { - throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + - " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk); + throw new TranslogCorruptedException(commitCheckpoint.toString(), + "checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content: expected " + + checkpoint + " but got " + checkpointFromDisk); } } else { copyCheckpointTo(commitCheckpoint); diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java index e0b9817f12d16..5ef4b49933970 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -29,7 +28,7 @@ import java.io.IOException; -public class TranslogStats implements Streamable, Writeable, ToXContentFragment { +public class TranslogStats implements Writeable, ToXContentFragment { private long translogSizeInBytes; private int numberOfOperations; @@ -122,11 +121,6 @@ public String toString() { return Strings.toString(this, true, true); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(numberOfOperations); diff --git a/server/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java b/server/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java index a38eed9bdf38f..6544d4447bfe9 100644 --- a/server/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java +++ b/server/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -29,7 +28,7 @@ import java.io.IOException; -public class WarmerStats implements Streamable, Writeable, ToXContentFragment { +public class WarmerStats implements Writeable, ToXContentFragment { private long current; @@ -111,11 +110,6 @@ static final class Fields { static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(current); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index fa1fa35588ec0..b167c30e32c6e 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -60,7 +60,6 @@ import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.store.IndicesStore; -import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.plugins.MapperPlugin; import java.util.ArrayList; @@ -242,7 +241,6 @@ protected void configure() { bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); bind(SyncedFlushService.class).asEagerSingleton(); - bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton(); bind(TransportResyncReplicationAction.class).asEagerSingleton(); bind(PrimaryReplicaSyncer.class).asEagerSingleton(); } diff --git a/server/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/server/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index fa52c88c02bbf..aa9e880180327 100644 --- a/server/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/server/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; @@ -55,12 +55,26 @@ /** * Global information on indices stats running on a specific node. */ -public class NodeIndicesStats implements Streamable, ToXContentFragment { +public class NodeIndicesStats implements Writeable, ToXContentFragment { private CommonStats stats; private Map> statsByShard; - NodeIndicesStats() { + public NodeIndicesStats(StreamInput in) throws IOException { + stats = new CommonStats(in); + if (in.readBoolean()) { + int entries = in.readVInt(); + statsByShard = new HashMap<>(); + for (int i = 0; i < entries; i++) { + Index index = new Index(in); + int indexShardListSize = in.readVInt(); + List indexShardStats = new ArrayList<>(indexShardListSize); + for (int j = 0; j < indexShardListSize; j++) { + indexShardStats.add(new IndexShardStats(in)); + } + statsByShard.put(index, indexShardStats); + } + } } public NodeIndicesStats(CommonStats oldStats, Map> statsByShard) { @@ -158,30 +172,6 @@ public RecoveryStats getRecoveryStats() { return stats.getRecoveryStats(); } - public static NodeIndicesStats readIndicesStats(StreamInput in) throws IOException { - NodeIndicesStats stats = new NodeIndicesStats(); - stats.readFrom(in); - return stats; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - stats = new CommonStats(in); - if (in.readBoolean()) { - int entries = in.readVInt(); - statsByShard = new HashMap<>(); - for (int i = 0; i < entries; i++) { - Index index = new Index(in); - int indexShardListSize = in.readVInt(); - List indexShardStats = new ArrayList<>(indexShardListSize); - for (int j = 0; j < indexShardListSize; j++) { - indexShardStats.add(IndexShardStats.readIndexShardStats(in)); - } - statsByShard.put(index, indexShardStats); - } - } - } - @Override public void writeTo(StreamOutput out) throws IOException { stats.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 48045e219252a..5797843161c59 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -325,17 +325,16 @@ public void checkParentLimit(long newBytesReserved, String label) throws Circuit message.append("/"); message.append(new ByteSizeValue(newBytesReserved)); message.append("]"); - } else { - message.append(", usages ["); - message.append(String.join(", ", - this.breakers.entrySet().stream().map(e -> { - final CircuitBreaker breaker = e.getValue(); - final long breakerUsed = (long)(breaker.getUsed() * breaker.getOverhead()); - return e.getKey() + "=" + breakerUsed + "/" + new ByteSizeValue(breakerUsed); - }) - .collect(Collectors.toList()))); - message.append("]"); } + message.append(", usages ["); + message.append(String.join(", ", + this.breakers.entrySet().stream().map(e -> { + final CircuitBreaker breaker = e.getValue(); + final long breakerUsed = (long)(breaker.getUsed() * breaker.getOverhead()); + return e.getKey() + "=" + breakerUsed + "/" + new ByteSizeValue(breakerUsed); + }) + .collect(Collectors.toList()))); + message.append("]"); // derive durability of a tripped parent breaker depending on whether the majority of memory tracked by // child circuit breakers is categorized as transient or permanent. CircuitBreaker.Durability durability = memoryUsed.transientChildUsage >= memoryUsed.permanentChildUsage ? diff --git a/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java b/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java index b44feab6e603f..4748c41d4b3e7 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java @@ -21,7 +21,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -33,7 +33,7 @@ /** * Result for all copies of a shard */ -public class ShardsSyncedFlushResult implements Streamable { +public class ShardsSyncedFlushResult implements Writeable { private String failureReason; private Map shardResponses; private String syncId; @@ -41,7 +41,18 @@ public class ShardsSyncedFlushResult implements Streamable { // some shards may be unassigned, so we need this as state private int totalShards; - private ShardsSyncedFlushResult() { + public ShardsSyncedFlushResult(StreamInput in) throws IOException { + failureReason = in.readOptionalString(); + int numResponses = in.readInt(); + shardResponses = new HashMap<>(); + for (int i = 0; i < numResponses; i++) { + ShardRouting shardRouting = new ShardRouting(in); + SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in); + shardResponses.put(shardRouting, response); + } + syncId = in.readOptionalString(); + shardId = new ShardId(in); + totalShards = in.readInt(); } public ShardId getShardId() { @@ -137,21 +148,6 @@ public ShardId shardId() { return shardId; } - @Override - public void readFrom(StreamInput in) throws IOException { - failureReason = in.readOptionalString(); - int numResponses = in.readInt(); - shardResponses = new HashMap<>(); - for (int i = 0; i < numResponses; i++) { - ShardRouting shardRouting = new ShardRouting(in); - SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in); - shardResponses.put(shardRouting, response); - } - syncId = in.readOptionalString(); - shardId = new ShardId(in); - totalShards = in.readInt(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(failureReason); @@ -164,10 +160,4 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); out.writeInt(totalShards); } - - public static ShardsSyncedFlushResult readShardsSyncedFlushResult(StreamInput in) throws IOException { - ShardsSyncedFlushResult shardsSyncedFlushResult = new ShardsSyncedFlushResult(); - shardsSyncedFlushResult.readFrom(in); - return shardsSyncedFlushResult; - } } diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index a33d4cb43beeb..cd8584c5c35e4 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -93,11 +93,11 @@ public SyncedFlushService(IndicesService indicesService, this.clusterService = clusterService; this.transportService = transportService; this.indexNameExpressionResolver = indexNameExpressionResolver; - transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, + transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, ThreadPool.Names.FLUSH, PreShardSyncedFlushRequest::new, new PreSyncedFlushTransportHandler()); - transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, + transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ThreadPool.Names.FLUSH, ShardSyncedFlushRequest::new, new SyncedFlushTransportHandler()); - transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest::new, ThreadPool.Names.SAME, + transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, ThreadPool.Names.SAME, InFlightOpsRequest::new, new InFlightOpCountTransportHandler()); } @@ -282,7 +282,7 @@ private void reportSuccessWithExistingSyncId(ShardId shardId, final Map results = new HashMap<>(); for (final ShardRouting shard : shards) { if (preSyncResponses.containsKey(shard.currentNodeId())) { - results.put(shard, new ShardSyncedFlushResponse()); + results.put(shard, new ShardSyncedFlushResponse((String) null)); } } listener.onResponse(new ShardsSyncedFlushResult(shardId, existingSyncId, totalShards, results)); @@ -322,9 +322,7 @@ protected void getInflightOpsCount(final ShardId shardId, new TransportResponseHandler() { @Override public InFlightOpsResponse read(StreamInput in) throws IOException { - InFlightOpsResponse response = new InFlightOpsResponse(); - response.readFrom(in); - return response; + return new InFlightOpsResponse(in); } @Override @@ -402,9 +400,7 @@ void sendSyncRequests(final String syncId, new TransportResponseHandler() { @Override public ShardSyncedFlushResponse read(StreamInput in) throws IOException { - ShardSyncedFlushResponse response = new ShardSyncedFlushResponse(); - response.readFrom(in); - return response; + return new ShardSyncedFlushResponse(in); } @Override @@ -468,9 +464,7 @@ void sendPreSyncRequests(final List shards, new TransportResponseHandler() { @Override public PreSyncedFlushResponse read(StreamInput in) throws IOException { - PreSyncedFlushResponse response = new PreSyncedFlushResponse(); - response.readFrom(in); - return response; + return new PreSyncedFlushResponse(in); } @Override @@ -520,7 +514,7 @@ private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest requ logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); switch (result) { case SUCCESS: - return new ShardSyncedFlushResponse(); + return new ShardSyncedFlushResponse((String) null); case COMMIT_MISMATCH: return new ShardSyncedFlushResponse("commit has changed"); case PENDING_OPERATIONS: @@ -543,7 +537,9 @@ private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) { public static final class PreShardSyncedFlushRequest extends TransportRequest { private ShardId shardId; - public PreShardSyncedFlushRequest() { + public PreShardSyncedFlushRequest(StreamInput in) throws IOException { + super(in); + this.shardId = new ShardId(in); } public PreShardSyncedFlushRequest(ShardId shardId) { @@ -563,12 +559,6 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.shardId = new ShardId(in); - } - public ShardId shardId() { return shardId; } @@ -584,7 +574,11 @@ static final class PreSyncedFlushResponse extends TransportResponse { int numDocs; @Nullable String existingSyncId = null; - PreSyncedFlushResponse() { + PreSyncedFlushResponse(StreamInput in) throws IOException { + super(in); + commitId = new Engine.CommitId(in); + numDocs = in.readInt(); + existingSyncId = in.readOptionalString(); } PreSyncedFlushResponse(Engine.CommitId commitId, int numDocs, String existingSyncId) { @@ -593,14 +587,6 @@ static final class PreSyncedFlushResponse extends TransportResponse { this.existingSyncId = existingSyncId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - commitId = new Engine.CommitId(in); - numDocs = in.readInt(); - existingSyncId = in.readOptionalString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { commitId.writeTo(out); @@ -615,7 +601,11 @@ public static final class ShardSyncedFlushRequest extends TransportRequest { private Engine.CommitId expectedCommitId; private ShardId shardId; - public ShardSyncedFlushRequest() { + public ShardSyncedFlushRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); + expectedCommitId = new Engine.CommitId(in); + syncId = in.readString(); } public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { @@ -624,14 +614,6 @@ public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId e this.syncId = syncId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - expectedCommitId = new Engine.CommitId(in); - syncId = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -671,20 +653,15 @@ public static final class ShardSyncedFlushResponse extends TransportResponse { */ String failureReason; - public ShardSyncedFlushResponse() { - failureReason = null; + public ShardSyncedFlushResponse(StreamInput in) throws IOException { + super(in); + failureReason = in.readOptionalString(); } public ShardSyncedFlushResponse(String failureReason) { this.failureReason = failureReason; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - failureReason = in.readOptionalString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(failureReason); @@ -707,9 +684,7 @@ public String toString() { } public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException { - ShardSyncedFlushResponse shardSyncedFlushResponse = new ShardSyncedFlushResponse(); - shardSyncedFlushResponse.readFrom(in); - return shardSyncedFlushResponse; + return new ShardSyncedFlushResponse(in); } } @@ -718,19 +693,15 @@ public static final class InFlightOpsRequest extends TransportRequest { private ShardId shardId; - public InFlightOpsRequest() { + public InFlightOpsRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); } public InFlightOpsRequest(ShardId shardId) { this.shardId = shardId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -756,7 +727,9 @@ static final class InFlightOpsResponse extends TransportResponse { int opCount; - InFlightOpsResponse() { + InFlightOpsResponse(StreamInput in) throws IOException { + super(in); + opCount = in.readVInt(); } InFlightOpsResponse(int opCount) { @@ -764,12 +737,6 @@ static final class InFlightOpsResponse extends TransportResponse { this.opCount = opCount; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - opCount = in.readVInt(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(opCount); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java new file mode 100644 index 0000000000000..09366a38a9957 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java @@ -0,0 +1,210 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.recovery; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Assertions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.util.concurrent.AsyncIOProcessor; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.store.StoreFileMetaData; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.function.Consumer; + +import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; + +/** + * File chunks are sent/requested sequentially by at most one thread at any time. However, the sender/requestor won't wait for the response + * before processing the next file chunk request to reduce the recovery time especially on secure/compressed or high latency communication. + *

+ * The sender/requestor can send up to {@code maxConcurrentFileChunks} file chunk requests without waiting for responses. Since the recovery + * target can receive file chunks out of order, it has to buffer those file chunks in memory and only flush to disk when there's no gap. + * To ensure the recover target never buffers more than {@code maxConcurrentFileChunks} file chunks, we allow the sender/requestor to send + * only up to {@code maxConcurrentFileChunks} file chunk requests from the last flushed (and acknowledged) file chunk. We leverage the local + * checkpoint tracker for this purpose. We generate a new sequence number and assign it to each file chunk request before sending; then mark + * that sequence number as processed when we receive a response for the corresponding file chunk request. With the local checkpoint tracker, + * we know the last acknowledged-flushed file-chunk is a file chunk whose {@code requestSeqId} equals to the local checkpoint because the + * recover target can flush all file chunks up to the local checkpoint. + *

+ * When the number of un-replied file chunk requests reaches the limit (i.e. the gap between the max_seq_no and the local checkpoint is + * greater than {@code maxConcurrentFileChunks}), the sending/requesting thread will abort its execution. That process will be resumed by + * one of the networking threads which receive/handle the responses of the current pending file chunk requests. This process will continue + * until all chunk requests are sent/responded. + */ +abstract class MultiFileTransfer implements Closeable { + private Status status = Status.PROCESSING; + private final Logger logger; + private final ActionListener listener; + private final LocalCheckpointTracker requestSeqIdTracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); + private final AsyncIOProcessor processor; + private final int maxConcurrentFileChunks; + private StoreFileMetaData currentFile = null; + private final Iterator remainingFiles; + private Tuple readAheadRequest = null; + + protected MultiFileTransfer(Logger logger, ThreadContext threadContext, ActionListener listener, + int maxConcurrentFileChunks, List files) { + this.logger = logger; + this.maxConcurrentFileChunks = maxConcurrentFileChunks; + this.listener = listener; + this.processor = new AsyncIOProcessor<>(logger, maxConcurrentFileChunks, threadContext) { + @Override + protected void write(List>> items) { + handleItems(items); + } + }; + this.remainingFiles = files.iterator(); + } + + public final void start() { + addItem(UNASSIGNED_SEQ_NO, null, null); // put a dummy item to start the processor + } + + private void addItem(long requestSeqId, StoreFileMetaData md, Exception failure) { + processor.put(new FileChunkResponseItem(requestSeqId, md, failure), e -> { assert e == null : e; }); + } + + private void handleItems(List>> items) { + if (status != Status.PROCESSING) { + assert status == Status.FAILED : "must not receive any response after the transfer was completed"; + // These exceptions will be ignored as we record only the first failure, log them for debugging purpose. + items.stream().filter(item -> item.v1().failure != null).forEach(item -> + logger.debug(new ParameterizedMessage("failed to transfer a file chunk request {}", item.v1().md), item.v1().failure)); + return; + } + try { + for (Tuple> item : items) { + final FileChunkResponseItem resp = item.v1(); + if (resp.requestSeqId == UNASSIGNED_SEQ_NO) { + continue; // not an actual item + } + requestSeqIdTracker.markSeqNoAsProcessed(resp.requestSeqId); + if (resp.failure != null) { + handleError(resp.md, resp.failure); + throw resp.failure; + } + } + while (requestSeqIdTracker.getMaxSeqNo() - requestSeqIdTracker.getProcessedCheckpoint() < maxConcurrentFileChunks) { + final Tuple request = readAheadRequest != null ? readAheadRequest : getNextRequest(); + readAheadRequest = null; + if (request == null) { + assert currentFile == null && remainingFiles.hasNext() == false; + if (requestSeqIdTracker.getMaxSeqNo() == requestSeqIdTracker.getProcessedCheckpoint()) { + onCompleted(null); + } + return; + } + final long requestSeqId = requestSeqIdTracker.generateSeqNo(); + sendChunkRequest(request.v2(), ActionListener.wrap( + r -> addItem(requestSeqId, request.v1(), null), + e -> addItem(requestSeqId, request.v1(), e))); + } + // While we are waiting for the responses, we can prepare the next request in advance + // so we can send it immediately when the responses arrive to reduce the transfer time. + if (readAheadRequest == null) { + readAheadRequest = getNextRequest(); + } + } catch (Exception e) { + onCompleted(e); + } + } + + private void onCompleted(Exception failure) { + if (Assertions.ENABLED && status != Status.PROCESSING) { + throw new AssertionError("invalid status: expected [" + Status.PROCESSING + "] actual [" + status + "]", failure); + } + status = failure == null ? Status.SUCCESS : Status.FAILED; + try { + IOUtils.close(failure, this); + } catch (Exception e) { + listener.onFailure(e); + return; + } + listener.onResponse(null); + } + + private Tuple getNextRequest() throws Exception { + try { + if (currentFile == null) { + if (remainingFiles.hasNext()) { + currentFile = remainingFiles.next(); + onNewFile(currentFile); + } else { + return null; + } + } + final StoreFileMetaData md = currentFile; + final Request request = nextChunkRequest(md); + if (request.lastChunk()) { + currentFile = null; + } + return Tuple.tuple(md, request); + } catch (Exception e) { + handleError(currentFile, e); + throw e; + } + } + + /** + * This method is called when starting sending/requesting a new file. Subclasses should override + * this method to reset the file offset or close the previous file and open a new file if needed. + */ + protected abstract void onNewFile(StoreFileMetaData md) throws IOException; + + protected abstract Request nextChunkRequest(StoreFileMetaData md) throws IOException; + + protected abstract void sendChunkRequest(Request request, ActionListener listener); + + protected abstract void handleError(StoreFileMetaData md, Exception e) throws Exception; + + private static class FileChunkResponseItem { + final long requestSeqId; + final StoreFileMetaData md; + final Exception failure; + + FileChunkResponseItem(long requestSeqId, StoreFileMetaData md, Exception failure) { + this.requestSeqId = requestSeqId; + this.md = md; + this.failure = failure; + } + } + + protected interface ChunkRequest { + /** + * @return {@code true} if this chunk request is the last chunk of the current file + */ + boolean lastChunk(); + } + + private enum Status { + PROCESSING, + SUCCESS, + FAILED + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index f53e8edecd9e6..631a878a58d31 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -71,7 +71,7 @@ public PeerRecoverySourceService(TransportService transportService, IndicesServi this.transportService = transportService; this.indicesService = indicesService; this.recoverySettings = recoverySettings; - transportService.registerRequestHandler(Actions.START_RECOVERY, StartRecoveryRequest::new, ThreadPool.Names.GENERIC, + transportService.registerRequestHandler(Actions.START_RECOVERY, ThreadPool.Names.GENERIC, StartRecoveryRequest::new, new StartRecoveryTransportRequestHandler()); } @@ -176,7 +176,7 @@ private RecoverySourceHandler createRecoverySourceHandler(StartRecoveryRequest r final RemoteRecoveryTargetHandler recoveryTarget = new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), transportService, request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime)); - handler = new RecoverySourceHandler(shard, recoveryTarget, request, + handler = new RecoverySourceHandler(shard, recoveryTarget, shard.getThreadPool(), request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), recoverySettings.getMaxConcurrentFileChunks()); return handler; } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 92f1558d71eca..8a11cdf5ec961 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -112,22 +112,22 @@ public PeerRecoveryTargetService(ThreadPool threadPool, TransportService transpo this.clusterService = clusterService; this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool); - transportService.registerRequestHandler(Actions.FILES_INFO, RecoveryFilesInfoRequest::new, ThreadPool.Names.GENERIC, new - FilesInfoRequestHandler()); - transportService.registerRequestHandler(Actions.FILE_CHUNK, RecoveryFileChunkRequest::new, ThreadPool.Names.GENERIC, new - FileChunkTransportRequestHandler()); + transportService.registerRequestHandler(Actions.FILES_INFO, ThreadPool.Names.GENERIC, RecoveryFilesInfoRequest::new, + new FilesInfoRequestHandler()); + transportService.registerRequestHandler(Actions.FILE_CHUNK, ThreadPool.Names.GENERIC, RecoveryFileChunkRequest::new, + new FileChunkTransportRequestHandler()); transportService.registerRequestHandler(Actions.CLEAN_FILES, ThreadPool.Names.GENERIC, RecoveryCleanFilesRequest::new, new CleanFilesRequestHandler()); transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, ThreadPool.Names.GENERIC, RecoveryPrepareForTranslogOperationsRequest::new, new PrepareForTranslogOperationsRequestHandler()); transportService.registerRequestHandler(Actions.TRANSLOG_OPS, ThreadPool.Names.GENERIC, RecoveryTranslogOperationsRequest::new, new TranslogOperationsRequestHandler()); - transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest::new, ThreadPool.Names.GENERIC, new - FinalizeRecoveryRequestHandler()); + transportService.registerRequestHandler(Actions.FINALIZE, ThreadPool.Names.GENERIC, RecoveryFinalizeRecoveryRequest::new, + new FinalizeRecoveryRequestHandler()); transportService.registerRequestHandler( Actions.HANDOFF_PRIMARY_CONTEXT, - RecoveryHandoffPrimaryContextRequest::new, ThreadPool.Names.GENERIC, + RecoveryHandoffPrimaryContextRequest::new, new HandoffPrimaryContextRequestHandler()); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java index 59480ccbe4233..704c9b55d145a 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java @@ -41,7 +41,21 @@ public final class RecoveryFileChunkRequest extends TransportRequest { private int totalTranslogOps; - public RecoveryFileChunkRequest() { + public RecoveryFileChunkRequest(StreamInput in) throws IOException { + super(in); + recoveryId = in.readLong(); + shardId = new ShardId(in); + String name = in.readString(); + position = in.readVLong(); + long length = in.readVLong(); + String checksum = in.readString(); + content = in.readBytesReference(); + Version writtenBy = Lucene.parseVersionLenient(in.readString(), null); + assert writtenBy != null; + metaData = new StoreFileMetaData(name, length, checksum, writtenBy); + lastChunk = in.readBoolean(); + totalTranslogOps = in.readVInt(); + sourceThrottleTimeInNanos = in.readLong(); } public RecoveryFileChunkRequest(long recoveryId, ShardId shardId, StoreFileMetaData metaData, long position, BytesReference content, @@ -92,24 +106,6 @@ public long sourceThrottleTimeInNanos() { return sourceThrottleTimeInNanos; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recoveryId = in.readLong(); - shardId = new ShardId(in); - String name = in.readString(); - position = in.readVLong(); - long length = in.readVLong(); - String checksum = in.readString(); - content = in.readBytesReference(); - Version writtenBy = Lucene.parseVersionLenient(in.readString(), null); - assert writtenBy != null; - metaData = new StoreFileMetaData(name, length, checksum, writtenBy); - lastChunk = in.readBoolean(); - totalTranslogOps = in.readVInt(); - sourceThrottleTimeInNanos = in.readLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java index b1fdef06ed518..7adc638b5c702 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java @@ -40,31 +40,8 @@ public class RecoveryFilesInfoRequest extends TransportRequest { int totalTranslogOps; - public RecoveryFilesInfoRequest() { - } - - RecoveryFilesInfoRequest(long recoveryId, ShardId shardId, List phase1FileNames, List phase1FileSizes, - List phase1ExistingFileNames, List phase1ExistingFileSizes, int totalTranslogOps) { - this.recoveryId = recoveryId; - this.shardId = shardId; - this.phase1FileNames = phase1FileNames; - this.phase1FileSizes = phase1FileSizes; - this.phase1ExistingFileNames = phase1ExistingFileNames; - this.phase1ExistingFileSizes = phase1ExistingFileSizes; - this.totalTranslogOps = totalTranslogOps; - } - - public long recoveryId() { - return this.recoveryId; - } - - public ShardId shardId() { - return shardId; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public RecoveryFilesInfoRequest(StreamInput in) throws IOException { + super(in); recoveryId = in.readLong(); shardId = new ShardId(in); int size = in.readVInt(); @@ -93,6 +70,25 @@ public void readFrom(StreamInput in) throws IOException { totalTranslogOps = in.readVInt(); } + RecoveryFilesInfoRequest(long recoveryId, ShardId shardId, List phase1FileNames, List phase1FileSizes, + List phase1ExistingFileNames, List phase1ExistingFileSizes, int totalTranslogOps) { + this.recoveryId = recoveryId; + this.shardId = shardId; + this.phase1FileNames = phase1FileNames; + this.phase1FileSizes = phase1FileSizes; + this.phase1ExistingFileNames = phase1ExistingFileNames; + this.phase1ExistingFileSizes = phase1ExistingFileSizes; + this.totalTranslogOps = totalTranslogOps; + } + + public long recoveryId() { + return this.recoveryId; + } + + public ShardId shardId() { + return shardId; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index 82036338be7ca..f7c302630b1ff 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -32,7 +32,11 @@ public class RecoveryFinalizeRecoveryRequest extends TransportRequest { private ShardId shardId; private long globalCheckpoint; - public RecoveryFinalizeRecoveryRequest() { + public RecoveryFinalizeRecoveryRequest(StreamInput in) throws IOException { + super(in); + recoveryId = in.readLong(); + shardId = new ShardId(in); + globalCheckpoint = in.readZLong(); } RecoveryFinalizeRecoveryRequest(final long recoveryId, final ShardId shardId, final long globalCheckpoint) { @@ -53,14 +57,6 @@ public long globalCheckpoint() { return globalCheckpoint; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recoveryId = in.readLong(); - shardId = new ShardId(in); - globalCheckpoint = in.readZLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java index bccb917646fe7..62382d608508a 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java @@ -39,7 +39,11 @@ class RecoveryHandoffPrimaryContextRequest extends TransportRequest { /** * Initialize an empty request (used to serialize into when reading from a stream). */ - RecoveryHandoffPrimaryContextRequest() { + RecoveryHandoffPrimaryContextRequest(StreamInput in) throws IOException { + super(in); + recoveryId = in.readLong(); + shardId = new ShardId(in); + primaryContext = new ReplicationTracker.PrimaryContext(in); } /** @@ -68,14 +72,6 @@ ReplicationTracker.PrimaryContext primaryContext() { return primaryContext; } - @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - recoveryId = in.readLong(); - shardId = new ShardId(in); - primaryContext = new ReplicationTracker.PrimaryContext(in); - } - @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index 6e2557176a82e..d632eff6f4b95 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -41,7 +41,7 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { } RecoveryPrepareForTranslogOperationsRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); recoveryId = in.readLong(); shardId = new ShardId(in); totalTranslogOps = in.readVInt(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 3ae9598124b05..e1353ecb52fac 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -37,7 +37,7 @@ import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.Loggers; @@ -49,7 +49,6 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.RecoveryEngineException; -import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; @@ -61,11 +60,12 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.Transports; import java.io.Closeable; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -73,13 +73,10 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.IntSupplier; import java.util.stream.StreamSupport; -import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; - /** * RecoverySourceHandler handles the three phases of shard recovery, which is * everything relating to copying the segment files as well as sending translog @@ -102,12 +99,15 @@ public class RecoverySourceHandler { private final int chunkSizeInBytes; private final RecoveryTargetHandler recoveryTarget; private final int maxConcurrentFileChunks; + private final ThreadPool threadPool; private final CancellableThreads cancellableThreads = new CancellableThreads(); + private final List resources = new CopyOnWriteArrayList<>(); - public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recoveryTarget, final StartRecoveryRequest request, - final int fileChunkSizeInBytes, final int maxConcurrentFileChunks) { + public RecoverySourceHandler(IndexShard shard, RecoveryTargetHandler recoveryTarget, ThreadPool threadPool, + StartRecoveryRequest request, int fileChunkSizeInBytes, int maxConcurrentFileChunks) { this.shard = shard; this.recoveryTarget = recoveryTarget; + this.threadPool = threadPool; this.request = request; this.shardId = this.request.shardId().id(); this.logger = Loggers.getLogger(getClass(), request.shardId(), "recover to " + request.targetNode().getName()); @@ -123,7 +123,6 @@ public StartRecoveryRequest getRequest() { * performs the recovery from the local engine to the target */ public void recoverToTarget(ActionListener listener) { - final List resources = new CopyOnWriteArrayList<>(); final Closeable releaseResources = () -> IOUtils.close(resources); final ActionListener wrappedListener = ActionListener.notifyOnce(listener); try { @@ -404,15 +403,17 @@ void phase1(IndexCommit snapshot, long globalCheckpoint, IntSupplier translogOps phase1FileNames.size(), new ByteSizeValue(totalSizeInBytes), phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSizeInBytes)); final StepListener sendFileInfoStep = new StepListener<>(); + final StepListener sendFilesStep = new StepListener<>(); final StepListener cleanFilesStep = new StepListener<>(); cancellableThreads.execute(() -> recoveryTarget.receiveFileInfo(phase1FileNames, phase1FileSizes, phase1ExistingFileNames, phase1ExistingFileSizes, translogOps.getAsInt(), sendFileInfoStep)); - sendFileInfoStep.whenComplete(r -> { - sendFiles(store, phase1Files.toArray(new StoreFileMetaData[0]), translogOps); - cleanFiles(store, recoverySourceMetadata, translogOps, globalCheckpoint, cleanFilesStep); - }, listener::onFailure); + sendFileInfoStep.whenComplete(r -> + sendFiles(store, phase1Files.toArray(new StoreFileMetaData[0]), translogOps, sendFilesStep), listener::onFailure); + + sendFilesStep.whenComplete(r -> + cleanFiles(store, recoverySourceMetadata, translogOps, globalCheckpoint, cleanFilesStep), listener::onFailure); final long totalSize = totalSizeInBytes; final long existingTotalSize = existingTotalSizeInBytes; @@ -571,6 +572,7 @@ private void sendBatch( final long mappingVersionOnPrimary, final ActionListener listener) throws IOException { assert ThreadPool.assertCurrentMethodIsNotCalledRecursively(); + assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[send translog]"); final List operations = nextBatch.get(); // send the leftover operations or if no operations were sent, request the target to respond with its local checkpoint if (operations.isEmpty() == false || firstBatch) { @@ -669,54 +671,80 @@ public String toString() { '}'; } - void sendFiles(Store store, StoreFileMetaData[] files, IntSupplier translogOps) throws Exception { + private static class FileChunk implements MultiFileTransfer.ChunkRequest { + final StoreFileMetaData md; + final BytesReference content; + final long position; + final boolean lastChunk; + + FileChunk(StoreFileMetaData md, BytesReference content, long position, boolean lastChunk) { + this.md = md; + this.content = content; + this.position = position; + this.lastChunk = lastChunk; + } + + @Override + public boolean lastChunk() { + return lastChunk; + } + } + + void sendFiles(Store store, StoreFileMetaData[] files, IntSupplier translogOps, ActionListener listener) { ArrayUtil.timSort(files, Comparator.comparingLong(StoreFileMetaData::length)); // send smallest first - final LocalCheckpointTracker requestSeqIdTracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); - final AtomicReference> error = new AtomicReference<>(); - final byte[] buffer = new byte[chunkSizeInBytes]; - for (final StoreFileMetaData md : files) { - if (error.get() != null) { - break; - } - try (IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE); - InputStream in = new InputStreamIndexInput(indexInput, md.length())) { - long position = 0; - int bytesRead; - while ((bytesRead = in.read(buffer, 0, buffer.length)) != -1) { - final BytesArray content = new BytesArray(buffer, 0, bytesRead); - final boolean lastChunk = position + content.length() == md.length(); - final long requestSeqId = requestSeqIdTracker.generateSeqNo(); - cancellableThreads.execute( - () -> requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqId - maxConcurrentFileChunks)); + + final MultiFileTransfer multiFileSender = + new MultiFileTransfer<>(logger, threadPool.getThreadContext(), listener, maxConcurrentFileChunks, Arrays.asList(files)) { + + final byte[] buffer = new byte[chunkSizeInBytes]; + InputStreamIndexInput currentInput = null; + long offset = 0; + + @Override + protected void onNewFile(StoreFileMetaData md) throws IOException { + offset = 0; + IOUtils.close(currentInput, () -> currentInput = null); + final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE); + currentInput = new InputStreamIndexInput(indexInput, md.length()) { + @Override + public void close() throws IOException { + IOUtils.close(indexInput, super::close); // InputStreamIndexInput's close is a noop + } + }; + } + + @Override + protected FileChunk nextChunkRequest(StoreFileMetaData md) throws IOException { + assert Transports.assertNotTransportThread("read file chunk"); cancellableThreads.checkForCancel(); - if (error.get() != null) { - break; + final int bytesRead = currentInput.read(buffer); + if (bytesRead == -1) { + throw new CorruptIndexException("file truncated; length=" + md.length() + " offset=" + offset, md.name()); } - final long requestFilePosition = position; - cancellableThreads.executeIO(() -> - recoveryTarget.writeFileChunk(md, requestFilePosition, content, lastChunk, translogOps.getAsInt(), - ActionListener.wrap( - r -> requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId), - e -> { - error.compareAndSet(null, Tuple.tuple(md, e)); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - ))); - position += content.length(); + final boolean lastChunk = offset + bytesRead == md.length(); + final FileChunk chunk = new FileChunk(md, new BytesArray(buffer, 0, bytesRead), offset, lastChunk); + offset += bytesRead; + return chunk; } - } catch (Exception e) { - error.compareAndSet(null, Tuple.tuple(md, e)); - break; - } - } - // When we terminate exceptionally, we don't wait for the outstanding requests as we don't use their results anyway. - // This allows us to end quickly and eliminate the complexity of handling requestSeqIds in case of error. - if (error.get() == null) { - cancellableThreads.execute(() -> requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqIdTracker.getMaxSeqNo())); - } - if (error.get() != null) { - handleErrorOnSendFiles(store, error.get().v2(), new StoreFileMetaData[]{error.get().v1()}); - } + + @Override + protected void sendChunkRequest(FileChunk request, ActionListener listener) { + cancellableThreads.execute(() -> recoveryTarget.writeFileChunk( + request.md, request.position, request.content, request.lastChunk, translogOps.getAsInt(), listener)); + } + + @Override + protected void handleError(StoreFileMetaData md, Exception e) throws Exception { + handleErrorOnSendFiles(store, e, new StoreFileMetaData[]{md}); + } + + @Override + public void close() throws IOException { + IOUtils.close(currentInput, () -> currentInput = null); + } + }; + resources.add(multiFileSender); + multiFileSender.start(); } private void cleanFiles(Store store, Store.MetadataSnapshot sourceMetadata, IntSupplier translogOps, @@ -740,6 +768,7 @@ private void cleanFiles(Store store, Store.MetadataSnapshot sourceMetadata, IntS private void handleErrorOnSendFiles(Store store, Exception e, StoreFileMetaData[] mds) throws Exception { final IOException corruptIndexException = ExceptionsHelper.unwrapCorruption(e); + assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[handle error on send/clean files]"); if (corruptIndexException != null) { Exception localException = null; for (StoreFileMetaData md : mds) { @@ -763,9 +792,8 @@ private void handleErrorOnSendFiles(Store store, Exception e, StoreFileMetaData[ shardId, request.targetNode(), mds), corruptIndexException); throw remoteException; } - } else { - throw e; } + throw e; } protected void failEngine(IOException cause) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index d29456ec42bed..eacbcd37a958c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -45,7 +44,7 @@ /** * Keeps track of state related to shard recovery. */ -public class RecoveryState implements ToXContentFragment, Streamable, Writeable { +public class RecoveryState implements ToXContentFragment, Writeable { public enum Stage { INIT((byte) 0), @@ -253,11 +252,6 @@ public static RecoveryState readRecoveryState(StreamInput in) throws IOException return new RecoveryState(in); } - @Override - public synchronized void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java index ca3d85de419b9..69d5b1961299b 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -98,7 +98,7 @@ long mappingVersionOnPrimary() { } RecoveryTranslogOperationsRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); recoveryId = in.readLong(); shardId = new ShardId(in); operations = Translog.readOperations(in, "recovery"); @@ -128,8 +128,4 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } -} diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryWaitForClusterStateRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryWaitForClusterStateRequest.java index d8ac7e59d73d1..ba39982abbe32 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryWaitForClusterStateRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryWaitForClusterStateRequest.java @@ -31,7 +31,11 @@ public class RecoveryWaitForClusterStateRequest extends TransportRequest { private ShardId shardId; private long clusterStateVersion; - public RecoveryWaitForClusterStateRequest() { + public RecoveryWaitForClusterStateRequest(StreamInput in) throws IOException { + super(in); + recoveryId = in.readLong(); + shardId = new ShardId(in); + clusterStateVersion = in.readVLong(); } RecoveryWaitForClusterStateRequest(long recoveryId, ShardId shardId, long clusterStateVersion) { @@ -52,14 +56,6 @@ public long clusterStateVersion() { return clusterStateVersion; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recoveryId = in.readLong(); - shardId = new ShardId(in); - clusterStateVersion = in.readVLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index bb5457c1a3dca..f6f8baedd9d13 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -181,7 +181,7 @@ public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesR * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up. */ throttleTimeInNanos), fileChunkRequestOptions, new ActionListenerResponseHandler<>( - ActionListener.map(listener, r -> null), in -> TransportResponse.Empty.INSTANCE)); + ActionListener.map(listener, r -> null), in -> TransportResponse.Empty.INSTANCE, ThreadPool.Names.GENERIC)); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java index 4ec20d17ac5be..19b87ac09501e 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java @@ -43,7 +43,16 @@ public class StartRecoveryRequest extends TransportRequest { private boolean primaryRelocation; private long startingSeqNo; - public StartRecoveryRequest() { + public StartRecoveryRequest(StreamInput in) throws IOException { + super(in); + recoveryId = in.readLong(); + shardId = new ShardId(in); + targetAllocationId = in.readString(); + sourceNode = new DiscoveryNode(in); + targetNode = new DiscoveryNode(in); + metadataSnapshot = new Store.MetadataSnapshot(in); + primaryRelocation = in.readBoolean(); + startingSeqNo = in.readLong(); } /** @@ -110,19 +119,6 @@ public long startingSeqNo() { return startingSeqNo; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recoveryId = in.readLong(); - shardId = new ShardId(in); - targetAllocationId = in.readString(); - sourceNode = new DiscoveryNode(in); - targetNode = new DiscoveryNode(in); - metadataSnapshot = new Store.MetadataSnapshot(in); - primaryRelocation = in.readBoolean(); - startingSeqNo = in.readLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index d56f9818183d6..0be67340a8046 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -100,7 +100,7 @@ public IndicesStore(Settings settings, IndicesService indicesService, this.clusterService = clusterService; this.transportService = transportService; this.threadPool = threadPool; - transportService.registerRequestHandler(ACTION_SHARD_EXISTS, ShardActiveRequest::new, ThreadPool.Names.SAME, + transportService.registerRequestHandler(ACTION_SHARD_EXISTS, ThreadPool.Names.SAME, ShardActiveRequest::new, new ShardActiveRequestHandler()); this.deleteShardTimeout = INDICES_STORE_DELETE_SHARD_TIMEOUT.get(settings); // Doesn't make sense to delete shards on non-data nodes @@ -397,7 +397,12 @@ private static class ShardActiveRequest extends TransportRequest { private String indexUUID; private ShardId shardId; - ShardActiveRequest() { + ShardActiveRequest(StreamInput in) throws IOException { + super(in); + clusterName = new ClusterName(in); + indexUUID = in.readString(); + shardId = new ShardId(in); + timeout = new TimeValue(in.readLong(), TimeUnit.MILLISECONDS); } ShardActiveRequest(ClusterName clusterName, String indexUUID, ShardId shardId, TimeValue timeout) { @@ -407,15 +412,6 @@ private static class ShardActiveRequest extends TransportRequest { this.timeout = timeout; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - clusterName = new ClusterName(in); - indexUUID = in.readString(); - shardId = new ShardId(in); - timeout = new TimeValue(in.readLong(), TimeUnit.MILLISECONDS); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 20633d24ba7eb..29589d7f53a20 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; @@ -36,12 +36,11 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexShard; @@ -62,11 +61,10 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction - implements AsyncShardFetch.Lister { public static final String ACTION_NAME = "internal:cluster/nodes/indices/shard/store"; + public static final ActionType TYPE = new ActionType<>(ACTION_NAME, NodesStoreFilesMetaData::new); private final Settings settings; private final IndicesService indicesService; @@ -86,19 +84,14 @@ public TransportNodesListShardStoreMetaData(Settings settings, ThreadPool thread this.namedXContentRegistry = namedXContentRegistry; } - @Override - public void list(ShardId shardId, DiscoveryNode[] nodes, ActionListener listener) { - execute(new Request(shardId, nodes), listener); - } - @Override protected NodeRequest newNodeRequest(Request request) { return new NodeRequest(request); } @Override - protected NodeStoreFilesMetaData newNodeResponse() { - return new NodeStoreFilesMetaData(); + protected NodeStoreFilesMetaData newNodeResponse(StreamInput in) throws IOException { + return new NodeStoreFilesMetaData(in); } @Override @@ -173,11 +166,13 @@ private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException } } - public static class StoreFilesMetaData implements Iterable, Streamable { + public static class StoreFilesMetaData implements Iterable, Writeable { private ShardId shardId; Store.MetadataSnapshot metadataSnapshot; - StoreFilesMetaData() { + public StoreFilesMetaData(StreamInput in) throws IOException { + this.shardId = new ShardId(in); + this.metadataSnapshot = new Store.MetadataSnapshot(in); } public StoreFilesMetaData(ShardId shardId, Store.MetadataSnapshot metadataSnapshot) { @@ -206,18 +201,6 @@ public StoreFileMetaData file(String name) { return metadataSnapshot.asMap().get(name); } - public static StoreFilesMetaData readStoreFilesMetaData(StreamInput in) throws IOException { - StoreFilesMetaData md = new StoreFilesMetaData(); - md.readFrom(in); - return md; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - shardId = new ShardId(in); - this.metadataSnapshot = new Store.MetadataSnapshot(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); @@ -245,7 +228,9 @@ public static class Request extends BaseNodesRequest { private ShardId shardId; - public Request() { + public Request(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); } public Request(ShardId shardId, DiscoveryNode[] nodes) { @@ -253,12 +238,6 @@ public Request(ShardId shardId, DiscoveryNode[] nodes) { this.shardId = shardId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -268,6 +247,10 @@ public void writeTo(StreamOutput out) throws IOException { public static class NodesStoreFilesMetaData extends BaseNodesResponse { + public NodesStoreFilesMetaData(StreamInput in) throws IOException { + super(in); + } + public NodesStoreFilesMetaData(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } @@ -279,7 +262,7 @@ protected List readNodesFrom(StreamInput in) throws IOEx @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } } @@ -288,19 +271,15 @@ public static class NodeRequest extends BaseNodeRequest { private ShardId shardId; - public NodeRequest() { + public NodeRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); } NodeRequest(TransportNodesListShardStoreMetaData.Request request) { this.shardId = request.shardId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = new ShardId(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -312,7 +291,9 @@ public static class NodeStoreFilesMetaData extends BaseNodeResponse { private StoreFilesMetaData storeFilesMetaData; - NodeStoreFilesMetaData() { + public NodeStoreFilesMetaData(StreamInput in) throws IOException { + super(in); + storeFilesMetaData = new StoreFilesMetaData(in); } public NodeStoreFilesMetaData(DiscoveryNode node, StoreFilesMetaData storeFilesMetaData) { @@ -325,15 +306,7 @@ public StoreFilesMetaData storeFilesMetaData() { } public static NodeStoreFilesMetaData readListShardStoreNodeOperationResponse(StreamInput in) throws IOException { - NodeStoreFilesMetaData resp = new NodeStoreFilesMetaData(); - resp.readFrom(in); - return resp; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - storeFilesMetaData = StoreFilesMetaData.readStoreFilesMetaData(in); + return new NodeStoreFilesMetaData(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 119e3a095d8e5..cb0524d211201 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -28,7 +28,6 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; import org.elasticsearch.action.search.SearchExecutionStatsCollector; import org.elasticsearch.action.search.SearchPhaseController; import org.elasticsearch.action.search.SearchTransportService; @@ -494,8 +493,6 @@ protected Node( clusterModule.getIndexNameExpressionResolver(), repositoryService, threadPool); SnapshotShardsService snapshotShardsService = new SnapshotShardsService(settings, clusterService, snapshotsService, threadPool, transportService, indicesService, actionModule.getActionFilters(), clusterModule.getIndexNameExpressionResolver()); - TransportNodesSnapshotsStatus nodesSnapshotsStatus = new TransportNodesSnapshotsStatus(threadPool, clusterService, - transportService, snapshotShardsService, actionModule.getActionFilters()); RestoreService restoreService = new RestoreService(clusterService, repositoryService, clusterModule.getAllocationService(), metaDataCreateIndexService, metaDataIndexUpgradeService, clusterService.getClusterSettings()); @@ -581,7 +578,6 @@ protected Node( b.bind(RepositoriesService.class).toInstance(repositoryService); b.bind(SnapshotsService.class).toInstance(snapshotsService); b.bind(SnapshotShardsService.class).toInstance(snapshotShardsService); - b.bind(TransportNodesSnapshotsStatus.class).toInstance(nodesSnapshotsStatus); b.bind(RestoreService.class).toInstance(restoreService); b.bind(RerouteService.class).toInstance(rerouteService); } diff --git a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java index ef216961489f9..46913c951ca29 100644 --- a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -47,18 +47,13 @@ * ActionType that is used by executor node to indicate that the persistent action finished or failed on the node and needs to be * removed from the cluster state in case of successful completion or restarted on some other node in case of failure. */ -public class CompletionPersistentTaskAction extends StreamableResponseActionType { +public class CompletionPersistentTaskAction extends ActionType { public static final CompletionPersistentTaskAction INSTANCE = new CompletionPersistentTaskAction(); public static final String NAME = "cluster:admin/persistent/completion"; private CompletionPersistentTaskAction() { - super(NAME); - } - - @Override - public PersistentTaskResponse newResponse() { - return new PersistentTaskResponse(); + super(NAME, PersistentTaskResponse::new); } public static class Request extends MasterNodeRequest { @@ -69,8 +64,13 @@ public static class Request extends MasterNodeRequest { private long allocationId = -1; - public Request() { + public Request() {} + public Request(StreamInput in) throws IOException { + super(in); + taskId = in.readString(); + allocationId = in.readLong(); + exception = in.readException(); } public Request(String taskId, long allocationId, Exception exception) { @@ -79,14 +79,6 @@ public Request(String taskId, long allocationId, Exception exception) { this.allocationId = allocationId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - taskId = in.readString(); - allocationId = in.readLong(); - exception = in.readException(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -141,7 +133,7 @@ public TransportAction(TransportService transportService, ClusterService cluster PersistentTasksClusterService persistentTasksClusterService, IndexNameExpressionResolver indexNameExpressionResolver) { super(CompletionPersistentTaskAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, Request::new); + Request::new, indexNameExpressionResolver); this.persistentTasksClusterService = persistentTasksClusterService; } @@ -151,8 +143,8 @@ protected String executor() { } @Override - protected PersistentTaskResponse newResponse() { - return new PersistentTaskResponse(); + protected PersistentTaskResponse read(StreamInput in) throws IOException { + return new PersistentTaskResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java index ed8550f4a3e3a..1f8125322d554 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java @@ -32,20 +32,15 @@ public class PersistentTaskResponse extends ActionResponse { private PersistentTask task; - public PersistentTaskResponse() { - super(); + public PersistentTaskResponse(StreamInput in) throws IOException { + super(in); + task = in.readOptionalWriteable(PersistentTask::new); } public PersistentTaskResponse(PersistentTask task) { this.task = task; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - task = in.readOptionalWriteable(PersistentTask::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(task); diff --git a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java index 8002be6626991..f2ce3b7e8ee05 100644 --- a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -41,26 +41,24 @@ import java.io.IOException; import java.util.Objects; -public class RemovePersistentTaskAction extends StreamableResponseActionType { +public class RemovePersistentTaskAction extends ActionType { public static final RemovePersistentTaskAction INSTANCE = new RemovePersistentTaskAction(); public static final String NAME = "cluster:admin/persistent/remove"; private RemovePersistentTaskAction() { - super(NAME); - } - - @Override - public PersistentTaskResponse newResponse() { - return new PersistentTaskResponse(); + super(NAME, PersistentTaskResponse::new); } public static class Request extends MasterNodeRequest { private String taskId; - public Request() { + public Request() {} + public Request(StreamInput in) throws IOException { + super(in); + taskId = in.readString(); } public Request(String taskId) { @@ -71,12 +69,6 @@ public void setTaskId(String taskId) { this.taskId = taskId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - taskId = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -126,7 +118,7 @@ public TransportAction(TransportService transportService, ClusterService cluster PersistentTasksClusterService persistentTasksClusterService, IndexNameExpressionResolver indexNameExpressionResolver) { super(RemovePersistentTaskAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, Request::new); + Request::new, indexNameExpressionResolver); this.persistentTasksClusterService = persistentTasksClusterService; } @@ -136,8 +128,8 @@ protected String executor() { } @Override - protected PersistentTaskResponse newResponse() { - return new PersistentTaskResponse(); + protected PersistentTaskResponse read(StreamInput in) throws IOException { + return new PersistentTaskResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java index 01fa8aa5edb1b..b23a8e9c39223 100644 --- a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -47,18 +47,13 @@ /** * This action can be used to add the record for the persistent action to the cluster state. */ -public class StartPersistentTaskAction extends StreamableResponseActionType { +public class StartPersistentTaskAction extends ActionType { public static final StartPersistentTaskAction INSTANCE = new StartPersistentTaskAction(); public static final String NAME = "cluster:admin/persistent/start"; private StartPersistentTaskAction() { - super(NAME); - } - - @Override - public PersistentTaskResponse newResponse() { - return new PersistentTaskResponse(); + super(NAME, PersistentTaskResponse::new); } public static class Request extends MasterNodeRequest { @@ -69,8 +64,13 @@ public static class Request extends MasterNodeRequest { private PersistentTaskParams params; - public Request() { + public Request() {} + public Request(StreamInput in) throws IOException { + super(in); + taskId = in.readString(); + taskName = in.readString(); + params = in.readNamedWriteable(PersistentTaskParams.class); } public Request(String taskId, String taskName, PersistentTaskParams params) { @@ -79,14 +79,6 @@ public Request(String taskId, String taskName, PersistentTaskParams params) { this.params = params; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - taskId = in.readString(); - taskName = in.readString(); - params = in.readNamedWriteable(PersistentTaskParams.class); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -190,7 +182,7 @@ public TransportAction(TransportService transportService, ClusterService cluster PersistentTasksService persistentTasksService, IndexNameExpressionResolver indexNameExpressionResolver) { super(StartPersistentTaskAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, Request::new); + Request::new, indexNameExpressionResolver); this.persistentTasksClusterService = persistentTasksClusterService; NodePersistentTasksExecutor executor = new NodePersistentTasksExecutor(threadPool); clusterService.addListener(new PersistentTasksNodeService(persistentTasksService, persistentTasksExecutorRegistry, @@ -203,8 +195,8 @@ protected String executor() { } @Override - protected PersistentTaskResponse newResponse() { - return new PersistentTaskResponse(); + protected PersistentTaskResponse read(StreamInput in) throws IOException { + return new PersistentTaskResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java index e3514406d7080..3e4898beb8a67 100644 --- a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -43,18 +43,13 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; -public class UpdatePersistentTaskStatusAction extends StreamableResponseActionType { +public class UpdatePersistentTaskStatusAction extends ActionType { public static final UpdatePersistentTaskStatusAction INSTANCE = new UpdatePersistentTaskStatusAction(); public static final String NAME = "cluster:admin/persistent/update_status"; private UpdatePersistentTaskStatusAction() { - super(NAME); - } - - @Override - public PersistentTaskResponse newResponse() { - return new PersistentTaskResponse(); + super(NAME, PersistentTaskResponse::new); } public static class Request extends MasterNodeRequest { @@ -63,7 +58,13 @@ public static class Request extends MasterNodeRequest { private long allocationId = -1L; private PersistentTaskState state; - public Request() { + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + taskId = in.readString(); + allocationId = in.readLong(); + state = in.readOptionalNamedWriteable(PersistentTaskState.class); } public Request(String taskId, long allocationId, PersistentTaskState state) { @@ -84,14 +85,6 @@ public void setState(PersistentTaskState state) { this.state = state; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - taskId = in.readString(); - allocationId = in.readLong(); - state = in.readOptionalNamedWriteable(PersistentTaskState.class); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -156,7 +149,7 @@ public TransportAction(TransportService transportService, ClusterService cluster PersistentTasksClusterService persistentTasksClusterService, IndexNameExpressionResolver indexNameExpressionResolver) { super(UpdatePersistentTaskStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, Request::new); + Request::new, indexNameExpressionResolver); this.persistentTasksClusterService = persistentTasksClusterService; } @@ -166,8 +159,8 @@ protected String executor() { } @Override - protected PersistentTaskResponse newResponse() { - return new PersistentTaskResponse(); + protected PersistentTaskResponse read(StreamInput in) throws IOException { + return new PersistentTaskResponse(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 2c2313286ef20..27472c6b5bd07 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -107,13 +107,6 @@ public Collection getSnapshotIds() { return Collections.unmodifiableCollection(snapshotIds.values()); } - /** - * Returns an immutable collection of all the snapshot ids in the repository. - */ - public Collection getAllSnapshotIds() { - return List.copyOf(snapshotIds.values()); - } - /** * Returns the {@link SnapshotState} for the given snapshot. Returns {@code null} if * there is no state for the snapshot. diff --git a/server/src/main/java/org/elasticsearch/repositories/VerificationFailure.java b/server/src/main/java/org/elasticsearch/repositories/VerificationFailure.java index c260bfa1d20ba..a465b663cd18d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerificationFailure.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerificationFailure.java @@ -19,13 +19,7 @@ package org.elasticsearch.repositories; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; - -import java.io.IOException; - -public class VerificationFailure implements Streamable { +public class VerificationFailure { private String nodeId; @@ -36,18 +30,6 @@ public class VerificationFailure implements Streamable { this.cause = cause; } - @Override - public void readFrom(StreamInput in) throws IOException { - nodeId = in.readOptionalString(); - cause = in.readException(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(nodeId); - out.writeException(cause); - } - @Override public String toString() { return "[" + nodeId + ", '" + cause + "']"; diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 24a5d3b561dda..98a5455244905 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -63,7 +63,7 @@ public VerifyNodeRepositoryAction(TransportService transportService, ClusterServ this.transportService = transportService; this.clusterService = clusterService; this.repositoriesService = repositoriesService; - transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest::new, ThreadPool.Names.SNAPSHOT, + transportService.registerRequestHandler(ACTION_NAME, ThreadPool.Names.SNAPSHOT, VerifyNodeRepositoryRequest::new, new VerifyNodeRepositoryRequestHandler()); } @@ -131,7 +131,10 @@ public static class VerifyNodeRepositoryRequest extends TransportRequest { private String repository; private String verificationToken; - public VerifyNodeRepositoryRequest() { + public VerifyNodeRepositoryRequest(StreamInput in) throws IOException { + super(in); + repository = in.readString(); + verificationToken = in.readString(); } VerifyNodeRepositoryRequest(String repository, String verificationToken) { @@ -139,13 +142,6 @@ public VerifyNodeRepositoryRequest() { this.verificationToken = verificationToken; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - repository = in.readString(); - verificationToken = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 38b5aaba3dee9..14e69f4c98f77 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -129,13 +129,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private static final int BUFFER_SIZE = 4096; - private static final String SNAPSHOT_PREFIX = "snap-"; + public static final String SNAPSHOT_PREFIX = "snap-"; - private static final String SNAPSHOT_CODEC = "snapshot"; + public static final String SNAPSHOT_CODEC = "snapshot"; - private static final String INDEX_FILE_PREFIX = "index-"; + public static final String INDEX_FILE_PREFIX = "index-"; - private static final String INDEX_LATEST_BLOB = "index.latest"; + public static final String INDEX_LATEST_BLOB = "index.latest"; private static final String TESTS_FILE = "tests-"; @@ -180,7 +180,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final ChecksumBlobStoreFormat indexMetaDataFormat; - private final ChecksumBlobStoreFormat snapshotFormat; + protected final ChecksumBlobStoreFormat snapshotFormat; private final boolean readOnly; @@ -362,7 +362,7 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met final String snapshotName = snapshotId.getName(); // check if the snapshot name already exists in the repository final RepositoryData repositoryData = getRepositoryData(); - if (repositoryData.getAllSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { + if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); } @@ -418,6 +418,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId, Action } final var survivingIndices = updatedRepositoryData.getIndices(); deleteIndices( + updatedRepositoryData, Optional.ofNullable(finalSnapshotInfo) .map(info -> info.indices().stream().filter(survivingIndices::containsKey) .map(updatedRepositoryData::resolveIndexId).collect(Collectors.toList())) @@ -434,7 +435,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId, Action private void cleanupStaleRootFiles(Set rootBlobNames, RepositoryData repositoryData) { final Set allSnapshotIds = - repositoryData.getAllSnapshotIds().stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); + repositoryData.getSnapshotIds().stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); final List blobsToDelete = rootBlobNames.stream().filter( blob -> { if (FsBlobContainer.isTempBlobName(blob)) { @@ -502,7 +503,8 @@ private void cleanupStaleIndices(Map foundIndices, Map indices, SnapshotId snapshotId, ActionListener listener) { + private void deleteIndices(RepositoryData repositoryData, List indices, SnapshotId snapshotId, + ActionListener listener) { if (indices.isEmpty()) { listener.onResponse(null); return; @@ -524,8 +526,8 @@ protected void doRun() { if (indexMetaData != null) { for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) { try { - deleteShardSnapshot(indexId, new ShardId(indexMetaData.getIndex(), shardId), snapshotId); - } catch (SnapshotException ex) { + deleteShardSnapshot(repositoryData, indexId, new ShardId(indexMetaData.getIndex(), shardId), snapshotId); + } catch (Exception ex) { final int finalShardId = shardId; logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, indexId.getName(), finalShardId), ex); @@ -951,9 +953,29 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s for (SnapshotFiles point : snapshots) { newSnapshotsList.add(point); } - // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index - finalizeShard(newSnapshotsList, fileListGeneration, blobs, "snapshot creation [" + snapshotId + "]", shardContainer, - shardId, snapshotId); + final String indexGeneration = Long.toString(fileListGeneration + 1); + final List blobsToDelete; + try { + final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); + indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, shardContainer, indexGeneration); + // Delete all previous index-N blobs + blobsToDelete = + blobs.keySet().stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList()); + assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))) + .max().orElse(-1L) < Long.parseLong(indexGeneration) + : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + "] when deleting index-N" + + " blobs " + blobsToDelete; + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, + "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" + + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e); + } + try { + shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", + snapshotId, shardId), e); + } snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis()); } catch (Exception e) { snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), ExceptionsHelper.detailedMessage(e)); @@ -1011,7 +1033,7 @@ public void verify(String seed, DiscoveryNode localNode) { } } else { BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); - if (testBlobContainer.blobExists("master.dat")) { + try (InputStream ignored = testBlobContainer.readBlob("master.dat")) { try { BytesArray bytes = new BytesArray(seed); try (InputStream stream = bytes.streamInput()) { @@ -1021,11 +1043,13 @@ public void verify(String seed, DiscoveryNode localNode) { throw new RepositoryVerificationException(metadata.name(), "store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]", exp); } - } else { + } catch (NoSuchFileException e) { throw new RepositoryVerificationException(metadata.name(), "a file written by master to the store [" + blobStore() + "] cannot be accessed on the node [" + localNode + "]. " + "This might indicate that the store [" + blobStore() + "] is not shared between this node and the master node or " + - "that permissions on the store don't allow reading files written by the master node"); + "that permissions on the store don't allow reading files written by the master node", e); + } catch (IOException e) { + throw new RepositoryVerificationException(metadata.name(), "Failed to verify repository", e); } } } @@ -1041,7 +1065,7 @@ public String toString() { /** * Delete shard snapshot */ - private void deleteShardSnapshot(IndexId indexId, ShardId snapshotShardId, SnapshotId snapshotId) { + private void deleteShardSnapshot(RepositoryData repositoryData, IndexId indexId, ShardId snapshotShardId, SnapshotId snapshotId) { final BlobContainer shardContainer = shardContainer(indexId, snapshotShardId); final Map blobs; try { @@ -1054,74 +1078,57 @@ private void deleteShardSnapshot(IndexId indexId, ShardId snapshotShardId, Snaps BlobStoreIndexShardSnapshots snapshots = tuple.v1(); long fileListGeneration = tuple.v2(); - try { - indexShardSnapshotFormat.delete(shardContainer, snapshotId.getUUID()); - } catch (IOException e) { - logger.warn(new ParameterizedMessage("[{}] [{}] failed to delete shard snapshot file", snapshotShardId, snapshotId), e); - } - // Build a list of snapshots that should be preserved List newSnapshotsList = new ArrayList<>(); + final Set survivingSnapshotNames = + repositoryData.getSnapshots(indexId).stream().map(SnapshotId::getName).collect(Collectors.toSet()); for (SnapshotFiles point : snapshots) { - if (!point.snapshot().equals(snapshotId.getName())) { + if (survivingSnapshotNames.contains(point.snapshot())) { newSnapshotsList.add(point); } } - // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index - finalizeShard(newSnapshotsList, fileListGeneration, blobs, "snapshot deletion [" + snapshotId + "]", shardContainer, - snapshotShardId, snapshotId); - } - - /** - * Loads information about shard snapshot - */ - private BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) { - try { - return indexShardSnapshotFormat.read(shardContainer, snapshotId.getUUID()); - } catch (IOException ex) { - throw new SnapshotException(metadata.name(), snapshotId, - "failed to read shard snapshot file for [" + shardContainer.path() + ']', ex); - } - } - - /** - * Writes a new index file for the shard and removes all unreferenced files from the repository. - * - * We need to be really careful in handling index files in case of failures to make sure we don't - * have index file that points to files that were deleted. - * - * @param snapshots list of active snapshots in the container - * @param fileListGeneration the generation number of the current snapshot index file - * @param blobs list of blobs in the container - * @param reason a reason explaining why the shard index file is written - */ - private void finalizeShard(List snapshots, long fileListGeneration, Map blobs, - String reason, BlobContainer shardContainer, ShardId shardId, SnapshotId snapshotId) { final String indexGeneration = Long.toString(fileListGeneration + 1); try { final List blobsToDelete; - if (snapshots.isEmpty()) { + if (newSnapshotsList.isEmpty()) { // If we deleted all snapshots, we don't need to create a new index file and simply delete all the blobs we found blobsToDelete = List.copyOf(blobs.keySet()); } else { - final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots); + final Set survivingSnapshotUUIDs = repositoryData.getSnapshots(indexId).stream().map(SnapshotId::getUUID) + .collect(Collectors.toSet()); + final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, shardContainer, indexGeneration); - // Delete all previous index-N, data-blobs that are not referenced by the new index-N and temporary blobs + // Delete all previous index-N, data- and meta-blobs and that are not referenced by the new index-N and temporary blobs blobsToDelete = blobs.keySet().stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX) - || blob.startsWith(DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null + || (blob.startsWith(SNAPSHOT_PREFIX) && blob.endsWith(".dat") + && survivingSnapshotUUIDs.contains( + blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length())) == false) + || (blob.startsWith(DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) || FsBlobContainer.isTempBlobName(blob)).collect(Collectors.toList()); } try { shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete); } catch (IOException e) { logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete blobs during finalization", - snapshotId, shardId), e); + snapshotId, snapshotShardId), e); } } catch (IOException e) { - String message = - "Failed to finalize " + reason + " with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]"; - throw new IndexShardSnapshotFailedException(shardId, message, e); + throw new IndexShardSnapshotFailedException(snapshotShardId, + "Failed to finalize snapshot deletion [" + snapshotId + "] with shard index [" + + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e); + } + } + + /** + * Loads information about shard snapshot + */ + private BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) { + try { + return indexShardSnapshotFormat.read(shardContainer, snapshotId.getUUID()); + } catch (IOException ex) { + throw new SnapshotException(metadata.name(), snapshotId, + "failed to read shard snapshot file for [" + shardContainer.path() + ']', ex); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 0f059a89e6c8c..bd2b4900ece71 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -72,15 +72,11 @@ public final class ChecksumBlobStoreFormat { SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); } - private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE; - // The format version public static final int VERSION = 1; private static final int BUFFER_SIZE = 4096; - private final XContentType xContentType; - private final boolean compress; private final String codec; @@ -96,29 +92,16 @@ public final class ChecksumBlobStoreFormat { * @param blobNameFormat format of the blobname in {@link String#format} format * @param reader prototype object that can deserialize T from XContent * @param compress true if the content should be compressed - * @param xContentType content type that should be used for write operations */ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunction reader, - NamedXContentRegistry namedXContentRegistry, boolean compress, XContentType xContentType) { + NamedXContentRegistry namedXContentRegistry, boolean compress) { this.reader = reader; this.blobNameFormat = blobNameFormat; this.namedXContentRegistry = namedXContentRegistry; - this.xContentType = xContentType; this.compress = compress; this.codec = codec; } - /** - * @param codec codec name - * @param blobNameFormat format of the blobname in {@link String#format} format - * @param reader prototype object that can deserialize T from XContent - * @param compress true if the content should be compressed - */ - public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunction reader, - NamedXContentRegistry namedXContentRegistry, boolean compress) { - this(codec, blobNameFormat, reader, namedXContentRegistry, compress, DEFAULT_X_CONTENT_TYPE); - } - /** * Reads and parses the blob with given name, applying name translation using the {link #blobName} method * @@ -158,7 +141,7 @@ public T readBlob(BlobContainer blobContainer, String blobName) throws IOExcepti long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, - bytes.slice((int) filePointer, (int) contentSize))) { + bytes.slice((int) filePointer, (int) contentSize), XContentType.SMILE)) { return reader.apply(parser); } } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { @@ -237,7 +220,7 @@ public void close() { } private void write(T obj, StreamOutput streamOutput) throws IOException { - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, streamOutput)) { + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, streamOutput)) { builder.startObject(); obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java index 4e3d652ec5d7e..467f1d969e8be 100644 --- a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java @@ -84,9 +84,22 @@ public XContentBuilder newErrorBuilder() throws IOException { */ @Override public XContentBuilder newBuilder(@Nullable XContentType requestContentType, boolean useFiltering) throws IOException { + return newBuilder(requestContentType, null, useFiltering); + } + + /** + * Creates a new {@link XContentBuilder} for a response to be sent using this channel. The builder's type can be sent as a parameter, + * through {@code responseContentType} or it can fallback to {@link #newBuilder(XContentType, boolean)} logic if the sent type value + * is {@code null}. + */ + @Override + public XContentBuilder newBuilder(@Nullable XContentType requestContentType, @Nullable XContentType responseContentType, + boolean useFiltering) throws IOException { + if (responseContentType == null) { + responseContentType = XContentType.fromMediaTypeOrFormat(format); + } // try to determine the response content type from the media type or the format query string parameter, with the format parameter // taking precedence over the Accept header - XContentType responseContentType = XContentType.fromMediaTypeOrFormat(format); if (responseContentType == null) { if (requestContentType != null) { // if there was a parsed content-type for the incoming request use that since no format was specified using the query diff --git a/server/src/main/java/org/elasticsearch/rest/RestChannel.java b/server/src/main/java/org/elasticsearch/rest/RestChannel.java index 8c8346f0ef4b2..ab4b1e710c12f 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestChannel.java +++ b/server/src/main/java/org/elasticsearch/rest/RestChannel.java @@ -36,6 +36,9 @@ public interface RestChannel { XContentBuilder newErrorBuilder() throws IOException; XContentBuilder newBuilder(@Nullable XContentType xContentType, boolean useFiltering) throws IOException; + + XContentBuilder newBuilder(@Nullable XContentType xContentType, @Nullable XContentType responseContentType, + boolean useFiltering) throws IOException; BytesStreamOutput bytesOutput(); diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 3f32d281918a3..5e1beb84ce5d0 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -508,6 +508,12 @@ public XContentBuilder newBuilder(@Nullable XContentType xContentType, boolean u return delegate.newBuilder(xContentType, useFiltering); } + @Override + public XContentBuilder newBuilder(XContentType xContentType, XContentType responseContentType, boolean useFiltering) + throws IOException { + return delegate.newBuilder(xContentType, responseContentType, useFiltering); + } + @Override public BytesStreamOutput bytesOutput() { return delegate.bytesOutput(); diff --git a/server/src/main/java/org/elasticsearch/rest/RestUtils.java b/server/src/main/java/org/elasticsearch/rest/RestUtils.java index e2be316cf92bf..827174743f722 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestUtils.java +++ b/server/src/main/java/org/elasticsearch/rest/RestUtils.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.path.PathTrie; @@ -30,6 +31,11 @@ public class RestUtils { + /** + * Sets whether we decode a '+' in an url as a space or not. + */ + private static final boolean DECODE_PLUS_AS_SPACE = Booleans.parseBoolean(System.getProperty("es.rest.url_plus_as_space", "false")); + public static final PathTrie.Decoder REST_DECODER = new PathTrie.Decoder() { @Override public String decode(String value) { @@ -55,7 +61,7 @@ public static void decodeQueryString(String s, int fromIndex, Map " " + buf[pos++] = (byte) (plusAsSpace ? ' ' : '+'); // "+" -> " " break; case '%': if (i == size - 1) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 2251615d678fb..aadf6e18fae26 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -23,8 +23,11 @@ import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; @@ -41,6 +44,14 @@ public final class RestReloadSecureSettingsAction extends BaseRestHandler { + static final ObjectParser PARSER = + new ObjectParser<>("reload_secure_settings", NodesReloadSecureSettingsRequest::new); + + static { + PARSER.declareString((request, value) -> request.setSecureStorePassword(new SecureString(value.toCharArray())), + new ParseField("secure_settings_password")); + } + public RestReloadSecureSettingsAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(POST, "/_nodes/reload_secure_settings", this); @@ -56,23 +67,28 @@ public String getName() { public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); final NodesReloadSecureSettingsRequestBuilder nodesRequestBuilder = client.admin() - .cluster() - .prepareReloadSecureSettings() - .setTimeout(request.param("timeout")) - .setNodesIds(nodesIds); - final NodesReloadSecureSettingsRequest nodesRequest = nodesRequestBuilder.request(); + .cluster() + .prepareReloadSecureSettings() + .setTimeout(request.param("timeout")) + .setNodesIds(nodesIds); + request.withContentOrSourceParamParserOrNull(parser -> { + if (parser != null) { + final NodesReloadSecureSettingsRequest nodesRequest = PARSER.parse(parser, null); + nodesRequestBuilder.setSecureStorePassword(nodesRequest.getSecureSettingsPassword()); + } + }); + return channel -> nodesRequestBuilder .execute(new RestBuilderListener(channel) { @Override public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) - throws Exception { + throws Exception { builder.startObject(); - { - RestActions.buildNodesHeader(builder, channel.request(), response); - builder.field("cluster_name", response.getClusterName().value()); - response.toXContent(builder, channel.request()); - } + RestActions.buildNodesHeader(builder, channel.request(), response); + builder.field("cluster_name", response.getClusterName().value()); + response.toXContent(builder, channel.request()); builder.endObject(); + nodesRequestBuilder.request().closePassword(); return new BytesRestResponse(RestStatus.OK, builder); } }); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java index 804fa61fc53b2..e89d257aefbca 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.rest.action.document; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.update.UpdateRequest; @@ -83,6 +84,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } updateRequest.retryOnConflict(request.paramAsInt("retry_on_conflict", updateRequest.retryOnConflict())); + if (request.hasParam("version") || request.hasParam("version_type")) { + final ActionRequestValidationException versioningError = new ActionRequestValidationException(); + versioningError.addValidationError("internal versioning can not be used for optimistic concurrency control. " + + "Please use `if_seq_no` and `if_primary_term` instead"); + throw versioningError; + } updateRequest.setIfSeqNo(request.paramAsLong("if_seq_no", updateRequest.ifSeqNo())); updateRequest.setIfPrimaryTerm(request.paramAsLong("if_primary_term", updateRequest.ifPrimaryTerm())); diff --git a/server/src/main/java/org/elasticsearch/script/Script.java b/server/src/main/java/org/elasticsearch/script/Script.java index 67ea4f24b83f8..bd74ed806cd74 100644 --- a/server/src/main/java/org/elasticsearch/script/Script.java +++ b/server/src/main/java/org/elasticsearch/script/Script.java @@ -53,7 +53,8 @@ * compile and execute a script from the {@link ScriptService} * based on the {@link ScriptType}. * - * There are three types of scripts specified by {@link ScriptType}. + * There are two types of scripts specified by {@link ScriptType}, + * INLINE, and STORED. * * The following describes the expected parameters for each type of script: * diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 27e735789f765..bbb19b83cd9f9 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -71,7 +71,6 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; -import static org.elasticsearch.search.fetch.subphase.highlight.HighlightField.readHighlightField; /** * A single search hit. @@ -160,12 +159,12 @@ public SearchHit(StreamInput in) throws IOException { if (size == 0) { fields = emptyMap(); } else if (size == 1) { - DocumentField hitField = DocumentField.readDocumentField(in); + DocumentField hitField = new DocumentField(in); fields = singletonMap(hitField.getName(), hitField); } else { Map fields = new HashMap<>(); for (int i = 0; i < size; i++) { - DocumentField hitField = DocumentField.readDocumentField(in); + DocumentField hitField = new DocumentField(in); fields.put(hitField.getName(), hitField); } this.fields = unmodifiableMap(fields); @@ -175,12 +174,12 @@ public SearchHit(StreamInput in) throws IOException { if (size == 0) { highlightFields = emptyMap(); } else if (size == 1) { - HighlightField field = readHighlightField(in); + HighlightField field = new HighlightField(in); highlightFields = singletonMap(field.name(), field); } else { Map highlightFields = new HashMap<>(); for (int i = 0; i < size; i++) { - HighlightField field = readHighlightField(in); + HighlightField field = new HighlightField(in); highlightFields.put(field.name(), field); } this.highlightFields = unmodifiableMap(highlightFields); diff --git a/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java b/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java index 2a81aac5117e1..77af879831e02 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java +++ b/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java @@ -90,11 +90,6 @@ public QuerySearchResult queryResult() { */ public FetchSearchResult fetchResult() { return null; } - @Override - public final void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { // TODO: this seems wrong, SearchPhaseResult should have a writeTo? diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java index f10318de02e43..3be59c0c1f7ec 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -526,6 +526,9 @@ private BucketReduceResult maybeMergeConsecutiveBuckets(BucketReduceResult reduc if (buckets.size() > targetBuckets) { for (int interval : roundingInfo.innerIntervals) { int resultingBuckets = buckets.size() / interval; + if (buckets.size() % interval != 0) { + resultingBuckets++; + } if (resultingBuckets <= targetBuckets) { return mergeConsecutiveBuckets(buckets, interval, roundingIdx, roundingInfo, reduceContext); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java index 1a863a209828b..ab4540b6d99eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -52,7 +53,7 @@ public class BucketHelpers { * "insert_zeros": empty buckets will be filled with zeros for all metrics * "skip": empty buckets will simply be ignored */ - public enum GapPolicy { + public enum GapPolicy implements Writeable { INSERT_ZEROS((byte) 0, "insert_zeros"), SKIP((byte) 1, "skip"); /** @@ -95,6 +96,7 @@ public static GapPolicy parse(String text, XContentLocation tokenLocation) { /** * Serialize the GapPolicy to the output stream */ + @Override public void writeTo(StreamOutput out) throws IOException { out.writeByte(id); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java index a0dabefed323f..ba2114c1152a3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java @@ -19,9 +19,10 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; @@ -29,13 +30,14 @@ import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import java.io.IOException; -import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; -import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.TreeMap; +import java.util.function.Function; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.BUCKETS_PATH; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.FORMAT; @@ -49,6 +51,34 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr private String format = null; private GapPolicy gapPolicy = GapPolicy.SKIP; + private static final Function> PARSER + = name -> { + + @SuppressWarnings("unchecked") + ConstructingObjectParser parser = new ConstructingObjectParser<>( + BucketScriptPipelineAggregationBuilder.NAME, + false, + o -> new BucketScriptPipelineAggregationBuilder(name, (Map) o[0], (Script) o[1])); + + parser.declareField(ConstructingObjectParser.constructorArg() + , BucketScriptPipelineAggregationBuilder::extractBucketPath + , BUCKETS_PATH_FIELD + , ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING); + parser.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> Script.parse(p), Script.SCRIPT_PARSE_FIELD, ObjectParser.ValueType.OBJECT_OR_STRING); + + parser.declareString(BucketScriptPipelineAggregationBuilder::format, FORMAT); + parser.declareField(BucketScriptPipelineAggregationBuilder::gapPolicy, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return GapPolicy.parse(p.text().toLowerCase(Locale.ROOT), p.getTokenLocation()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, GAP_POLICY, ObjectParser.ValueType.STRING); + + return parser; + }; + + public BucketScriptPipelineAggregationBuilder(String name, Map bucketsPathsMap, Script script) { super(name, NAME, new TreeMap<>(bucketsPathsMap).values().toArray(new String[bucketsPathsMap.size()])); this.bucketsPathsMap = bucketsPathsMap; @@ -86,6 +116,27 @@ protected void doWriteTo(StreamOutput out) throws IOException { gapPolicy.writeTo(out); } + private static Map extractBucketPath(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_STRING) { + // input is a string, name of the path set to '_value'. + // This is a bit odd as there is not constructor for it + return Collections.singletonMap("_value", parser.text()); + } else if (token == XContentParser.Token.START_ARRAY) { + // input is an array, name of the path set to '_value' + position + Map bucketsPathsMap = new HashMap<>(); + int i =0; + while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + bucketsPathsMap.put("_value" + i++, path); + } + return bucketsPathsMap; + } else { + // input is an object, it should contain name / value pairs + return parser.mapStrings(); + } + } + private static Map convertToBucketsPathMap(String[] bucketsPaths) { Map bucketsPathsMap = new HashMap<>(); for (int i = 0; i < bucketsPaths.length; i++) { @@ -154,86 +205,10 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param return builder; } - public static BucketScriptPipelineAggregationBuilder parse(String reducerName, XContentParser parser) throws IOException { - XContentParser.Token token; - Script script = null; - String currentFieldName = null; - Map bucketsPathsMap = null; - String format = null; - GapPolicy gapPolicy = null; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { - format = parser.text(); - } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { - bucketsPathsMap = new HashMap<>(); - bucketsPathsMap.put("_value", parser.text()); - } else if (GAP_POLICY.match(currentFieldName, parser.getDeprecationHandler())) { - gapPolicy = GapPolicy.parse(parser.text(), parser.getTokenLocation()); - } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - script = Script.parse(parser); - } else { - throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { - List paths = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - String path = parser.text(); - paths.add(path); - } - bucketsPathsMap = new HashMap<>(); - for (int i = 0; i < paths.size(); i++) { - bucketsPathsMap.put("_value" + i, paths.get(i)); - } - } else { - throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); - } - } else if (token == XContentParser.Token.START_OBJECT) { - if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - script = Script.parse(parser); - } else if (BUCKETS_PATH.match(currentFieldName, parser.getDeprecationHandler())) { - Map map = parser.map(); - bucketsPathsMap = new HashMap<>(); - for (Map.Entry entry : map.entrySet()) { - bucketsPathsMap.put(entry.getKey(), String.valueOf(entry.getValue())); - } - } else { - throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " in [" + reducerName + "]."); - } - } - - if (bucketsPathsMap == null) { - throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + BUCKETS_PATH.getPreferredName() - + "] for series_arithmetic aggregation [" + reducerName + "]"); - } - - if (script == null) { - throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + Script.SCRIPT_PARSE_FIELD.getPreferredName() - + "] for series_arithmetic aggregation [" + reducerName + "]"); - } - - BucketScriptPipelineAggregationBuilder factory = - new BucketScriptPipelineAggregationBuilder(reducerName, bucketsPathsMap, script); - if (format != null) { - factory.format(format); - } - if (gapPolicy != null) { - factory.gapPolicy(gapPolicy); - } - return factory; + public static BucketScriptPipelineAggregationBuilder parse(String aggName, XContentParser parser) { + return PARSER.apply(aggName).apply(parser, null); } - @Override protected boolean overrideBucketsPath() { return true; diff --git a/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java b/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java index 19da9ea9b9100..b8321b0acd7a3 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java @@ -28,17 +28,28 @@ import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; -public class AggregatedDfs implements Streamable { +public class AggregatedDfs implements Writeable { private ObjectObjectHashMap termStatistics; private ObjectObjectHashMap fieldStatistics; private long maxDoc; - private AggregatedDfs() { + public AggregatedDfs(StreamInput in) throws IOException { + int size = in.readVInt(); + termStatistics = HppcMaps.newMap(size); + for (int i = 0; i < size; i++) { + Term term = new Term(in.readString(), in.readBytesRef()); + TermStatistics stats = new TermStatistics(in.readBytesRef(), + in.readVLong(), + DfsSearchResult.subOne(in.readVLong())); + termStatistics.put(term, stats); + } + fieldStatistics = DfsSearchResult.readFieldStats(in); + maxDoc = in.readVLong(); } public AggregatedDfs(ObjectObjectHashMap termStatistics, @@ -60,27 +71,6 @@ public long maxDoc() { return maxDoc; } - public static AggregatedDfs readAggregatedDfs(StreamInput in) throws IOException { - AggregatedDfs result = new AggregatedDfs(); - result.readFrom(in); - return result; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - int size = in.readVInt(); - termStatistics = HppcMaps.newMap(size); - for (int i = 0; i < size; i++) { - Term term = new Term(in.readString(), in.readBytesRef()); - TermStatistics stats = new TermStatistics(in.readBytesRef(), - in.readVLong(), - DfsSearchResult.subOne(in.readVLong())); - termStatistics.put(term, stats); - } - fieldStatistics = DfsSearchResult.readFieldStats(in); - maxDoc = in.readVLong(); - } - @Override public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(termStatistics.size()); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java index 6957eea4758d9..cb2bc99370bca 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java @@ -110,11 +110,6 @@ public ScoreDoc lastEmittedDoc() { return lastEmittedDoc; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java index b81d9eded9cd6..647d629a390c6 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java @@ -73,8 +73,4 @@ public IndicesOptions indicesOptions() { return originalIndices.indicesOptions(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } -} diff --git a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java index 8cb175ae156c0..9ac90806aa4c7 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java @@ -67,7 +67,7 @@ public StoredFieldsContext(StoredFieldsContext other) { public StoredFieldsContext(StreamInput in) throws IOException { this.fetchFields = in.readBoolean(); if (fetchFields) { - this.fieldNames = (List) in.readGenericValue(); + this.fieldNames = new ArrayList<>((List) in.readGenericValue()); } else { this.fieldNames = null; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java index 8b839a7e8c07e..13b4387b3911c 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,13 +39,25 @@ /** * A field highlighted with its highlighted fragments. */ -public class HighlightField implements ToXContentFragment, Streamable { +public class HighlightField implements ToXContentFragment, Writeable { private String name; private Text[] fragments; - HighlightField() { + public HighlightField(StreamInput in) throws IOException { + name = in.readString(); + if (in.readBoolean()) { + int size = in.readVInt(); + if (size == 0) { + fragments = Text.EMPTY_ARRAY; + } else { + fragments = new Text[size]; + for (int i = 0; i < size; i++) { + fragments[i] = in.readText(); + } + } + } } public HighlightField(String name, Text[] fragments) { @@ -86,28 +98,6 @@ public String toString() { return "[" + name + "], fragments[" + Arrays.toString(fragments) + "]"; } - public static HighlightField readHighlightField(StreamInput in) throws IOException { - HighlightField field = new HighlightField(); - field.readFrom(in); - return field; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - if (in.readBoolean()) { - int size = in.readVInt(); - if (size == 0) { - fragments = Text.EMPTY_ARRAY; - } else { - fragments = new Text[size]; - for (int i = 0; i < size; i++) { - fragments[i] = in.readText(); - } - } - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java index c551205f6b5db..9d7ba557cc260 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java @@ -71,11 +71,6 @@ public InternalScrollSearchRequest scroll(Scroll scroll) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 07557d9459973..0cb7b7401bfba 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -158,11 +158,6 @@ public String preference() { return shardSearchLocalRequest.preference(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public BytesReference cacheKey() throws IOException { return shardSearchLocalRequest.cacheKey(); diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java index dbb14fda71783..e458603310ce4 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java @@ -34,8 +34,6 @@ import java.io.IOException; import java.util.Map; -import static org.elasticsearch.search.dfs.AggregatedDfs.readAggregatedDfs; - public class QuerySearchRequest extends TransportRequest implements IndicesRequest { private long id; @@ -56,7 +54,7 @@ public QuerySearchRequest(OriginalIndices originalIndices, long id, AggregatedDf public QuerySearchRequest(StreamInput in) throws IOException { super(in); id = in.readLong(); - dfs = readAggregatedDfs(in); + dfs = new AggregatedDfs(in); originalIndices = OriginalIndices.readOriginalIndices(in); } @@ -86,11 +84,6 @@ public IndicesOptions indicesOptions() { return originalIndices.indicesOptions(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new SearchTask(id, type, action, getDescription(), parentTaskId, headers); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java index 943733c6423d3..dec01d379f7e2 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -30,7 +29,7 @@ import java.io.IOException; -public class CompletionStats implements Streamable, Writeable, ToXContentFragment { +public class CompletionStats implements Writeable, ToXContentFragment { private static final String COMPLETION = "completion"; private static final String SIZE_IN_BYTES = "size_in_bytes"; @@ -66,11 +65,6 @@ public FieldMemoryStats getFields() { return fields; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(sizeInBytes); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java index f76a8fe3e88fe..002a8ba972b0e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,7 +39,7 @@ *

* Returned as part of {@link org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse} */ -public class RestoreInfo implements ToXContentObject, Streamable { +public class RestoreInfo implements ToXContentObject, Writeable { private String name; @@ -49,8 +49,7 @@ public class RestoreInfo implements ToXContentObject, Streamable { private int successfulShards; - RestoreInfo() { - } + RestoreInfo() {} public RestoreInfo(String name, List indices, int totalShards, int successfulShards) { this.name = name; @@ -59,6 +58,18 @@ public RestoreInfo(String name, List indices, int totalShards, int succe this.successfulShards = successfulShards; } + public RestoreInfo(StreamInput in) throws IOException { + name = in.readString(); + int size = in.readVInt(); + List indicesListBuilder = new ArrayList<>(); + for (int i = 0; i < size; i++) { + indicesListBuilder.add(in.readString()); + } + indices = Collections.unmodifiableList(indicesListBuilder); + totalShards = in.readVInt(); + successfulShards = in.readVInt(); + } + /** * Snapshot name * @@ -149,19 +160,6 @@ public static RestoreInfo fromXContent(XContentParser parser) throws IOException return PARSER.parse(parser, null); } - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - int size = in.readVInt(); - List indicesListBuilder = new ArrayList<>(); - for (int i = 0; i < size; i++) { - indicesListBuilder.add(in.readString()); - } - indices = Collections.unmodifiableList(indicesListBuilder); - totalShards = in.readVInt(); - successfulShards = in.readVInt(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); @@ -180,7 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { * @return restore info */ public static RestoreInfo readOptionalRestoreInfo(StreamInput in) throws IOException { - return in.readOptionalStreamable(RestoreInfo::new); + return in.readOptionalWriteable(RestoreInfo::new); } @Override diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotId.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotId.java index 59e1d960bcbfc..4a03ebc032fa1 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotId.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotId.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -129,19 +128,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } - - public static SnapshotId fromXContent(XContentParser parser) throws IOException { - String name = null; - String uuid = null; - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String currentFieldName = parser.currentName(); - parser.nextToken(); - if (NAME.equals(currentFieldName)) { - name = parser.text(); - } else if (UUID.equals(currentFieldName)) { - uuid = parser.text(); - } - } - return new SnapshotId(name, uuid); - } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index 3c7007bd27f17..48d00a619f450 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -301,7 +301,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { if (size > 0) { List failureBuilder = new ArrayList<>(); for (int i = 0; i < size; i++) { - failureBuilder.add(SnapshotShardFailure.readSnapshotShardFailure(in)); + failureBuilder.add(new SnapshotShardFailure(in)); } shardFailures = Collections.unmodifiableList(failureBuilder); } else { @@ -316,17 +316,6 @@ public SnapshotInfo(final StreamInput in) throws IOException { } } - /** - * Gets a new {@link SnapshotInfo} instance for a snapshot that is incompatible with the - * current version of the cluster. - */ - public static SnapshotInfo incompatible(SnapshotId snapshotId) { - return new SnapshotInfo(snapshotId, Collections.emptyList(), SnapshotState.INCOMPATIBLE, - "the snapshot is incompatible with the current version of Elasticsearch and its exact version is unknown", - null, 0L, 0L, 0, 0, - Collections.emptyList(), null, null); - } - /** * Gets a new {@link SnapshotInfo} instance from the given {@link SnapshotInfo} with * all information stripped out except the snapshot id, state, and indices. diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java index d95fdb0a55692..d51130b6f113d 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java @@ -45,8 +45,13 @@ public class SnapshotShardFailure extends ShardOperationFailedException { private String nodeId; private ShardId shardId; - private SnapshotShardFailure() { - + SnapshotShardFailure(StreamInput in) throws IOException { + nodeId = in.readOptionalString(); + shardId = new ShardId(in); + super.shardId = shardId.getId(); + index = shardId.getIndexName(); + reason = in.readString(); + status = RestStatus.readFrom(in); } /** @@ -84,28 +89,6 @@ public String nodeId() { return nodeId; } - /** - * Reads shard failure information from stream input - * - * @param in stream input - * @return shard failure information - */ - static SnapshotShardFailure readSnapshotShardFailure(StreamInput in) throws IOException { - SnapshotShardFailure exp = new SnapshotShardFailure(); - exp.readFrom(in); - return exp; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - nodeId = in.readOptionalString(); - shardId = new ShardId(in); - super.shardId = shardId.getId(); - index = shardId.getIndexName(); - reason = in.readString(); - status = RestStatus.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(nodeId); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index c2a15d7fffc51..7d59d01e72d46 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -428,8 +428,13 @@ public static class UpdateIndexShardSnapshotStatusRequest extends MasterNodeRequ private ShardId shardId; private ShardSnapshotStatus status; - public UpdateIndexShardSnapshotStatusRequest() { + public UpdateIndexShardSnapshotStatusRequest() {} + public UpdateIndexShardSnapshotStatusRequest(StreamInput in) throws IOException { + super(in); + snapshot = new Snapshot(in); + shardId = new ShardId(in); + status = new ShardSnapshotStatus(in); } public UpdateIndexShardSnapshotStatusRequest(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status) { @@ -445,14 +450,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - snapshot = new Snapshot(in); - shardId = new ShardId(in); - status = new ShardSnapshotStatus(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -511,9 +508,7 @@ public void onFailure(Exception e) { new TransportResponseHandler() { @Override public UpdateIndexShardSnapshotStatusResponse read(StreamInput in) throws IOException { - final UpdateIndexShardSnapshotStatusResponse response = new UpdateIndexShardSnapshotStatusResponse(); - response.readFrom(in); - return response; + return new UpdateIndexShardSnapshotStatusResponse(in); } @Override @@ -611,6 +606,13 @@ private static class SnapshotStateExecutor implements ClusterStateTaskExecutor * Tasks are used for communication with transport actions. As a result, they can contain callback * references as well as mutable state. That makes it impractical to send tasks over transport channels - * and use in APIs. Instead, immutable and streamable TaskInfo objects are used to represent + * and use in APIs. Instead, immutable and writeable TaskInfo objects are used to represent * snapshot information about currently running tasks. */ public final class TaskInfo implements Writeable, ToXContentFragment { diff --git a/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java b/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java index 0167865af9e29..941028dd84c31 100644 --- a/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java @@ -35,8 +35,10 @@ public class BytesTransportRequest extends TransportRequest { BytesReference bytes; Version version; - public BytesTransportRequest() { - + public BytesTransportRequest(StreamInput in) throws IOException { + super(in); + bytes = in.readBytesReference(); + version = in.getVersion(); } public BytesTransportRequest(BytesReference bytes, Version version) { @@ -52,13 +54,6 @@ public BytesReference bytes() { return this.bytes; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - bytes = in.readBytesReference(); - version = in.getVersion(); - } - /** * Writes the data in a "thin" manner, without the actual bytes, assumes * the actual bytes will be appended right after this content. diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index 3c31cddb39945..ed26d0b07cdba 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -33,9 +33,11 @@ import java.io.Closeable; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -238,6 +240,13 @@ public int size() { return connectedNodes.size(); } + /** + * Returns the set of nodes this manager is connected to. + */ + public Set connectedNodes() { + return Collections.unmodifiableSet(connectedNodes.keySet()); + } + @Override public void close() { assert Transports.assertNotTransportThread("Closing ConnectionManager"); diff --git a/server/src/main/java/org/elasticsearch/transport/InboundMessage.java b/server/src/main/java/org/elasticsearch/transport/InboundMessage.java index 318e22701627d..ab07c772f927e 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundMessage.java @@ -19,10 +19,10 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.compress.NotCompressedException; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -75,11 +75,8 @@ InboundMessage deserialize(BytesReference reference) throws IOException { final boolean isHandshake = TransportStatus.isHandshake(status); ensureVersionCompatibility(remoteVersion, version, isHandshake); if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamInput.available() > 0) { - Compressor compressor; - try { - final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; - compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)); - } catch (NotCompressedException ex) { + Compressor compressor = getCompressor(reference); + if (compressor == null) { int maxToRead = Math.min(reference.length(), 10); StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [") .append(maxToRead).append("] content bytes out of [").append(reference.length()) @@ -99,7 +96,13 @@ InboundMessage deserialize(BytesReference reference) throws IOException { InboundMessage message; if (TransportStatus.isRequest(status)) { - final Set features = Collections.unmodifiableSet(new TreeSet<>(Arrays.asList(streamInput.readStringArray()))); + final String[] featuresFound = streamInput.readStringArray(); + final Set features; + if (featuresFound.length == 0) { + features = Collections.emptySet(); + } else { + features = Collections.unmodifiableSet(new TreeSet<>(Arrays.asList(featuresFound))); + } final String action = streamInput.readString(); message = new Request(threadContext, remoteVersion, status, requestId, action, features, streamInput); } else { @@ -115,6 +118,13 @@ InboundMessage deserialize(BytesReference reference) throws IOException { } } + @Nullable + static Compressor getCompressor(BytesReference message) { + final int offset = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; + return CompressorFactory.COMPRESSOR.isCompressed(message.slice(offset, message.length() - offset)) + ? CompressorFactory.COMPRESSOR : null; + } + @Override public void close() throws IOException { streamInput.close(); diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java index 4b816c6a065e5..7438a399a9206 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -40,7 +40,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.Set; final class OutboundHandler { @@ -95,13 +94,12 @@ void sendRequest(final DiscoveryNode node, final TcpChannel channel, final long * Sends the response to the given channel. This method should be used to send {@link TransportResponse} * objects back to the caller. * - * @see #sendErrorResponse(Version, Set, TcpChannel, long, String, Exception) for sending error responses + * @see #sendErrorResponse(Version, TcpChannel, long, String, Exception) for sending error responses */ - void sendResponse(final Version nodeVersion, final Set features, final TcpChannel channel, - final long requestId, final String action, final TransportResponse response, - final boolean compress, final boolean isHandshake) throws IOException { + void sendResponse(final Version nodeVersion, final TcpChannel channel, final long requestId, final String action, + final TransportResponse response, final boolean compress, final boolean isHandshake) throws IOException { Version version = Version.min(this.version, nodeVersion); - OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), features, response, version, + OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), response, version, requestId, isHandshake, compress); ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, response)); sendMessage(channel, message, listener); @@ -110,12 +108,12 @@ void sendResponse(final Version nodeVersion, final Set features, final T /** * Sends back an error response to the caller via the given channel */ - void sendErrorResponse(final Version nodeVersion, final Set features, final TcpChannel channel, final long requestId, - final String action, final Exception error) throws IOException { + void sendErrorResponse(final Version nodeVersion, final TcpChannel channel, final long requestId, final String action, + final Exception error) throws IOException { Version version = Version.min(this.version, nodeVersion); TransportAddress address = new TransportAddress(channel.getLocalAddress()); RemoteTransportException tx = new RemoteTransportException(nodeName, address, action, error); - OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), features, tx, version, requestId, + OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), tx, version, requestId, false, false); ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, error)); sendMessage(channel, message, listener); diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java index f107f4c1306e5..dbea9991db5aa 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -23,14 +23,12 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.ThreadContext; import java.io.IOException; -import java.util.Set; -abstract class OutboundMessage extends NetworkMessage implements Writeable { +abstract class OutboundMessage extends NetworkMessage { private final Writeable message; @@ -49,7 +47,6 @@ BytesReference serialize(BytesStreamOutput bytesStream) throws IOException { try (CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bytesStream, TransportStatus.isCompress(status))) { stream.setVersion(version); threadContext.writeTo(stream); - writeTo(stream); reference = writeMessage(stream); } bytesStream.seek(0); @@ -57,7 +54,7 @@ BytesReference serialize(BytesStreamOutput bytesStream) throws IOException { return reference; } - private BytesReference writeMessage(CompressibleBytesOutputStream stream) throws IOException { + protected BytesReference writeMessage(CompressibleBytesOutputStream stream) throws IOException { final BytesReference zeroCopyBuffer; if (message instanceof BytesTransportRequest) { BytesTransportRequest bRequest = (BytesTransportRequest) message; @@ -96,9 +93,10 @@ static class Request extends OutboundMessage { } @Override - public void writeTo(StreamOutput out) throws IOException { + protected BytesReference writeMessage(CompressibleBytesOutputStream out) throws IOException { out.writeStringArray(features); out.writeString(action); + return super.writeMessage(out); } private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { @@ -117,17 +115,8 @@ private static byte setStatus(boolean compress, boolean isHandshake, Writeable m static class Response extends OutboundMessage { - private final Set features; - - Response(ThreadContext threadContext, Set features, Writeable message, Version version, long requestId, - boolean isHandshake, boolean compress) { + Response(ThreadContext threadContext, Writeable message, Version version, long requestId, boolean isHandshake, boolean compress) { super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); - this.features = features; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.setFeatures(features); } private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 8587e963c26d9..e94e7ded21657 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -50,16 +50,15 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Iterator; import java.util.List; -import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; @@ -84,7 +83,6 @@ final class RemoteClusterConnection implements TransportConnectionListener, Clos private final TransportService transportService; private final ConnectionManager connectionManager; - private final ConnectedNodes connectedNodes; private final String clusterAlias; private final int maxNumRemoteConnections; private final Predicate nodePredicate; @@ -94,7 +92,7 @@ final class RemoteClusterConnection implements TransportConnectionListener, Clos private volatile boolean skipUnavailable; private final ConnectHandler connectHandler; private final TimeValue initialConnectionTimeout; - private SetOnce remoteClusterName = new SetOnce<>(); + private final SetOnce remoteClusterName = new SetOnce<>(); /** * Creates a new {@link RemoteClusterConnection} @@ -123,7 +121,6 @@ final class RemoteClusterConnection implements TransportConnectionListener, Clos this.nodePredicate = nodePredicate; this.clusterAlias = clusterAlias; this.connectionManager = connectionManager; - this.connectedNodes = new ConnectedNodes(clusterAlias); this.seedNodes = Collections.unmodifiableList(seedNodes); this.skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE .getConcreteSettingForNamespace(clusterAlias).get(settings); @@ -176,8 +173,7 @@ boolean isSkipUnavailable() { @Override public void onNodeDisconnected(DiscoveryNode node) { - boolean remove = connectedNodes.remove(node); - if (remove && connectedNodes.size() < maxNumRemoteConnections) { + if (connectionManager.size() < maxNumRemoteConnections) { // try to reconnect and fill up the slot of the disconnected node connectHandler.forceConnect(); } @@ -188,7 +184,7 @@ public void onNodeDisconnected(DiscoveryNode node) { * will invoke the listener immediately. */ void ensureConnected(ActionListener voidActionListener) { - if (connectedNodes.size() == 0) { + if (connectionManager.size() == 0) { connectHandler.connect(voidActionListener); } else { voidActionListener.onResponse(null); @@ -212,9 +208,7 @@ void collectNodes(ActionListener> listener) { @Override public ClusterStateResponse read(StreamInput in) throws IOException { - ClusterStateResponse response = new ClusterStateResponse(); - response.readFrom(in); - return response; + return new ClusterStateResponse(in); } @Override @@ -259,6 +253,22 @@ Transport.Connection getConnection(DiscoveryNode remoteClusterNode) { return new ProxyConnection(connection, remoteClusterNode); } + private Predicate getRemoteClusterNamePredicate() { + return + new Predicate() { + @Override + public boolean test(ClusterName c) { + return remoteClusterName.get() == null || c.equals(remoteClusterName.get()); + } + + @Override + public String toString() { + return remoteClusterName.get() == null ? "any cluster name" + : "expected remote cluster name [" + remoteClusterName.get().value() + "]"; + } + }; + } + static final class ProxyConnection implements Transport.Connection { private final Transport.Connection proxyConnection; @@ -458,22 +468,20 @@ private void collectRemoteNodes(Iterator> seedNodes, fin ConnectionProfile connectionProfile = connectionManager.getConnectionProfile(); handshakeResponse = PlainActionFuture.get(fut -> transportService.handshake(connection, connectionProfile.getHandshakeTimeout().millis(), - (c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get()), fut)); + getRemoteClusterNamePredicate(), fut)); } catch (IllegalStateException ex) { - logger.warn(() -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + - "cluster name {}", connection.getNode(), remoteClusterName.get()), ex); + logger.warn(new ParameterizedMessage("failed to connect to seed node [{}]", connection.getNode()), ex); throw ex; } final DiscoveryNode handshakeNode = maybeAddProxyAddress(proxyAddress, handshakeResponse.getDiscoveryNode()); - if (nodePredicate.test(handshakeNode) && connectedNodes.size() < maxNumRemoteConnections) { + if (nodePredicate.test(handshakeNode) && manager.size() < maxNumRemoteConnections) { PlainActionFuture.get(fut -> manager.connectToNode(handshakeNode, null, transportService.connectionValidator(handshakeNode), ActionListener.map(fut, x -> null))); if (remoteClusterName.get() == null) { assert handshakeResponse.getClusterName().value() != null; remoteClusterName.set(handshakeResponse.getClusterName()); } - connectedNodes.add(handshakeNode); } ClusterStateRequest request = new ClusterStateRequest(); request.clear(); @@ -557,9 +565,7 @@ private class SniffClusterStateResponseHandler implements TransportResponseHandl @Override public ClusterStateResponse read(StreamInput in) throws IOException { - ClusterStateResponse response = new ClusterStateResponse(); - response.readFrom(in); - return response; + return new ClusterStateResponse(in); } @Override @@ -580,12 +586,11 @@ public void handleResponse(ClusterStateResponse response) { Iterable nodesIter = nodes.getNodes()::valuesIt; for (DiscoveryNode n : nodesIter) { DiscoveryNode node = maybeAddProxyAddress(proxyAddress, n); - if (nodePredicate.test(node) && connectedNodes.size() < maxNumRemoteConnections) { + if (nodePredicate.test(node) && connectionManager.size() < maxNumRemoteConnections) { try { // noop if node is connected PlainActionFuture.get(fut -> connectionManager.connectToNode(node, null, transportService.connectionValidator(node), ActionListener.map(fut, x -> null))); - connectedNodes.add(node); } catch (ConnectTransportException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node // fair enough we can't connect just move on @@ -628,15 +633,20 @@ boolean assertNoRunningConnections() { // for testing only } boolean isNodeConnected(final DiscoveryNode node) { - return connectedNodes.contains(node); + return connectionManager.nodeConnected(node); } - DiscoveryNode getAnyConnectedNode() { - return connectedNodes.getAny(); - } + private final AtomicLong nextNodeId = new AtomicLong(); - void addConnectedNode(DiscoveryNode node) { - connectedNodes.add(node); + DiscoveryNode getAnyConnectedNode() { + List nodes = new ArrayList<>(connectionManager.connectedNodes()); + if (nodes.isEmpty()) { + throw new NoSuchRemoteClusterException(clusterAlias); + } else { + long curr; + while ((curr = nextNodeId.incrementAndGet()) == Long.MIN_VALUE); + return nodes.get(Math.floorMod(curr, nodes.size())); + } } /** @@ -647,67 +657,13 @@ public RemoteConnectionInfo getConnectionInfo() { clusterAlias, seedNodes.stream().map(Tuple::v1).collect(Collectors.toList()), maxNumRemoteConnections, - connectedNodes.size(), + getNumNodesConnected(), initialConnectionTimeout, skipUnavailable); } int getNumNodesConnected() { - return connectedNodes.size(); - } - - private static final class ConnectedNodes { - - private final Set nodeSet = new HashSet<>(); - private final String clusterAlias; - - private Iterator currentIterator = null; - - private ConnectedNodes(String clusterAlias) { - this.clusterAlias = clusterAlias; - } - - public synchronized DiscoveryNode getAny() { - ensureIteratorAvailable(); - if (currentIterator.hasNext()) { - return currentIterator.next(); - } else { - throw new NoSuchRemoteClusterException(clusterAlias); - } - } - - synchronized boolean remove(DiscoveryNode node) { - final boolean setRemoval = nodeSet.remove(node); - if (setRemoval) { - currentIterator = null; - } - return setRemoval; - } - - synchronized boolean add(DiscoveryNode node) { - final boolean added = nodeSet.add(node); - if (added) { - currentIterator = null; - } - return added; - } - - synchronized int size() { - return nodeSet.size(); - } - - synchronized boolean contains(DiscoveryNode node) { - return nodeSet.contains(node); - } - - private synchronized void ensureIteratorAvailable() { - if (currentIterator == null) { - currentIterator = nodeSet.iterator(); - } else if (currentIterator.hasNext() == false && nodeSet.isEmpty() == false) { - // iterator rollover - currentIterator = nodeSet.iterator(); - } - } + return connectionManager.size(); } private static ConnectionManager createConnectionManager(ConnectionProfile connectionProfile, TransportService transportService) { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 7ebda8336bd7f..b0abc88848584 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -156,7 +156,7 @@ public TcpTransport(Settings settings, Version version, ThreadPool threadPool, P (node, channel, requestId, v) -> outboundHandler.sendRequest(node, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, new TransportHandshaker.HandshakeRequest(version), TransportRequestOptions.EMPTY, v, false, true), - (v, features1, channel, response, requestId) -> outboundHandler.sendResponse(v, features1, channel, requestId, + (v, features1, channel, response, requestId) -> outboundHandler.sendResponse(v, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, response, false, true)); InboundMessage.Reader reader = new InboundMessage.Reader(version, namedWriteableRegistry, threadPool.getThreadContext()); this.keepAlive = new TransportKeepAlive(threadPool, this.outboundHandler::sendBytes); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java b/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java index aab6e25001ddc..315b67e92e9d8 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java @@ -61,7 +61,7 @@ public String getProfileName() { @Override public void sendResponse(TransportResponse response) throws IOException { try { - outboundHandler.sendResponse(version, features, channel, requestId, action, response, compressResponse, false); + outboundHandler.sendResponse(version, channel, requestId, action, response, compressResponse, false); } finally { release(false); } @@ -70,7 +70,7 @@ public void sendResponse(TransportResponse response) throws IOException { @Override public void sendResponse(Exception exception) throws IOException { try { - outboundHandler.sendErrorResponse(version, features, channel, requestId, action, exception); + outboundHandler.sendErrorResponse(version, channel, requestId, action, exception); } finally { release(true); } diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index e81fb9c380e9b..32499ce19d576 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -22,13 +22,12 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; + import java.io.Closeable; import java.io.IOException; import java.net.UnknownHostException; @@ -53,6 +52,10 @@ public interface Transport extends LifecycleComponent { void setMessageListener(TransportMessageListener listener); + default boolean isSecure() { + return false; + } + /** * The address the transport is bound on. */ @@ -74,10 +77,6 @@ public interface Transport extends LifecycleComponent { */ List getDefaultSeedAddresses(); - default CircuitBreaker getInFlightRequestBreaker() { - return new NoopCircuitBreaker("in-flight-noop"); - } - /** * Opens a new connection to the given node. When the connection is fully connected, the listener is called. * The ActionListener will be called on the calling thread or the generic thread pool. diff --git a/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java b/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java index e1e3c25f083cf..ff351743b5999 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java @@ -114,11 +114,6 @@ static class ProxyRequest extends TransportRequest { wrapped = reader.read(in); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java deleted file mode 100644 index 6b45feec94859..0000000000000 --- a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.threadpool.ThreadPool; - -import java.io.IOException; - -/** - * Base class for delegating transport response to a transport channel - */ -public class TransportChannelResponseHandler implements TransportResponseHandler { - - private final Logger logger; - private final TransportChannel channel; - private final String extraInfoOnError; - private final Writeable.Reader reader; - - public TransportChannelResponseHandler(Logger logger, TransportChannel channel, String extraInfoOnError, - Writeable.Reader reader) { - this.logger = logger; - this.channel = channel; - this.extraInfoOnError = extraInfoOnError; - this.reader = reader; - } - - @Override - public T read(StreamInput in) throws IOException { - return reader.read(in); - } - - @Override - public void handleResponse(T response) { - try { - channel.sendResponse(response); - } catch (IOException e) { - handleException(new TransportException(e)); - } - } - - @Override - public void handleException(TransportException exp) { - try { - channel.sendResponse(exp); - } catch (IOException e) { - logger.debug(() -> new ParameterizedMessage( - "failed to send failure {}", extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), e); - } - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } -} diff --git a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java index b77ae83f67cfc..55ce080563421 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java @@ -187,11 +187,6 @@ static final class HandshakeRequest extends TransportRequest { } } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput streamOutput) throws IOException { super.writeTo(streamOutput); @@ -213,15 +208,10 @@ static final class HandshakeResponse extends TransportResponse { } private HandshakeResponse(StreamInput in) throws IOException { - super.readFrom(in); + super(in); responseVersion = Version.readVersion(in); } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { assert responseVersion != null; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportInfo.java b/server/src/main/java/org/elasticsearch/transport/TransportInfo.java index f8a75db65f37e..b27bfe993257e 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportInfo.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -43,13 +42,13 @@ public TransportInfo(BoundTransportAddress address, @Nullable Map 0) { profileAddresses = new HashMap<>(size); for (int i = 0; i < size; i++) { String key = in.readString(); - BoundTransportAddress value = BoundTransportAddress.readBoundTransportAddress(in); + BoundTransportAddress value = new BoundTransportAddress(in); profileAddresses.put(key, value); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java b/server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java index cc90a6c6d2331..13c7669251da3 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java @@ -20,8 +20,7 @@ package org.elasticsearch.transport; import org.elasticsearch.cluster.node.DiscoveryNode; - -import java.util.function.Supplier; +import org.elasticsearch.common.io.stream.Writeable.Reader; /** * This interface allows plugins to intercept requests on both the sender and the receiver side. @@ -29,8 +28,8 @@ public interface TransportInterceptor { /** * This is called for each handler that is registered via - * {@link TransportService#registerRequestHandler(String, Supplier, String, boolean, boolean, TransportRequestHandler)} or - * {@link TransportService#registerRequestHandler(String, Supplier, String, TransportRequestHandler)}. The returned handler is + * {@link TransportService#registerRequestHandler(String, String, boolean, boolean, Reader, TransportRequestHandler)} or + * {@link TransportService#registerRequestHandler(String, String, Reader, TransportRequestHandler)}. The returned handler is * used instead of the passed in handler. By default the provided handler is returned. */ default TransportRequestHandler interceptHandler(String action, String executor, diff --git a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java index ef72cb472301c..770554477b684 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java @@ -18,12 +18,11 @@ */ package org.elasticsearch.transport; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.Compressor; -import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.compress.NotCompressedException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -88,11 +87,9 @@ private String format(TcpChannel channel, BytesReference message, String event) if (isRequest) { if (TransportStatus.isCompress(status)) { Compressor compressor; - try { - final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; - compressor = CompressorFactory.compressor(message.slice(bytesConsumed, message.length() - bytesConsumed)); - } catch (NotCompressedException ex) { - throw new IllegalStateException(ex); + compressor = InboundMessage.getCompressor(message); + if (compressor == null) { + throw new IllegalStateException(new NotCompressedException()); } streamInput = compressor.streamInput(streamInput); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportMessage.java b/server/src/main/java/org/elasticsearch/transport/TransportMessage.java index 67b54ca58b75f..431308742022f 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportMessage.java @@ -20,13 +20,10 @@ package org.elasticsearch.transport; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.transport.TransportAddress; -import java.io.IOException; - -public abstract class TransportMessage implements Streamable, Writeable { +public abstract class TransportMessage implements Writeable { private TransportAddress remoteAddress; @@ -41,18 +38,11 @@ public TransportAddress remoteAddress() { /** * Constructs a new empty transport message */ - public TransportMessage() { - } + public TransportMessage() {} /** * Constructs a new transport message with the data from the {@link StreamInput}. This is * currently a no-op */ - public TransportMessage(StreamInput in) throws IOException { - } - - @Override - public void readFrom(StreamInput in) throws IOException { - - } + public TransportMessage(StreamInput in) {} } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportRequest.java b/server/src/main/java/org/elasticsearch/transport/TransportRequest.java index 866a34302c50a..5405869d03baa 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportRequest.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportRequest.java @@ -29,6 +29,12 @@ public abstract class TransportRequest extends TransportMessage implements TaskAwareRequest { public static class Empty extends TransportRequest { public static final Empty INSTANCE = new Empty(); + + public Empty() {} + + public Empty(StreamInput in) throws IOException { + super(in); + } } /** @@ -59,12 +65,6 @@ public TaskId getParentTask() { return parentTaskId; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - parentTaskId = TaskId.readFromStream(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { parentTaskId.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index d0e2ed9853499..ad4ec5d84757b 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; @@ -175,9 +174,9 @@ public TransportService(Settings settings, Transport transport, ThreadPool threa } registerRequestHandler( HANDSHAKE_ACTION_NAME, - () -> HandshakeRequest.INSTANCE, ThreadPool.Names.SAME, false, false, + HandshakeRequest::new, (request, channel, task) -> channel.sendResponse( new HandshakeResponse(localNode, clusterName, localNode.getVersion()))); } @@ -301,6 +300,10 @@ public TransportStats stats() { return transport.getStats(); } + public boolean isTransportSecure() { + return transport.isSecure(); + } + public BoundTransportAddress boundAddress() { return transport.boundAddress(); } @@ -420,7 +423,8 @@ public void handshake( final Transport.Connection connection, final long handshakeTimeout, final ActionListener listener) { - handshake(connection, handshakeTimeout, clusterName::equals, ActionListener.map(listener, HandshakeResponse::getDiscoveryNode)); + handshake(connection, handshakeTimeout, clusterName.getEqualityPredicate(), + ActionListener.map(listener, HandshakeResponse::getDiscoveryNode)); } /** @@ -447,12 +451,12 @@ public void handshake( new ActionListener<>() { @Override public void onResponse(HandshakeResponse response) { - if (!clusterNamePredicate.test(response.clusterName)) { - listener.onFailure(new IllegalStateException("handshake failed, mismatched cluster name [" + - response.clusterName + "] - " + node.toString())); + if (clusterNamePredicate.test(response.clusterName) == false) { + listener.onFailure(new IllegalStateException("handshake with [" + node + "] failed: remote cluster name [" + + response.clusterName.value() + "] does not match " + clusterNamePredicate)); } else if (response.version.isCompatible(localNode.getVersion()) == false) { - listener.onFailure(new IllegalStateException("handshake failed, incompatible version [" + - response.version + "] - " + node)); + listener.onFailure(new IllegalStateException("handshake with [" + node + "] failed: remote node version [" + + response.version + "] is incompatible with local node version [" + localNode.getVersion() + "]")); } else { listener.onResponse(response); } @@ -475,6 +479,10 @@ static class HandshakeRequest extends TransportRequest { public static final HandshakeRequest INSTANCE = new HandshakeRequest(); + HandshakeRequest(StreamInput in) throws IOException { + super(in); + } + private HandshakeRequest() { } @@ -719,7 +727,7 @@ protected void doRun() throws Exception { } private void sendLocalRequest(long requestId, final String action, final TransportRequest request, TransportRequestOptions options) { - final DirectResponseChannel channel = new DirectResponseChannel(logger, localNode, action, requestId, this, threadPool); + final DirectResponseChannel channel = new DirectResponseChannel(localNode, action, requestId, this, threadPool); try { onRequestSent(localNode, requestId, action, request, options); onRequestReceived(requestId, action); @@ -827,23 +835,6 @@ public static boolean isValidActionName(String actionName) { return false; } - /** - * Registers a new request handler - * - * @param action The action the request handler is associated with - * @param requestFactory a callable to be used construct new instances for streaming - * @param executor The executor the request handling will be executed on - * @param handler The handler itself that implements the request handling - */ - public void registerRequestHandler(String action, Supplier requestFactory, - String executor, TransportRequestHandler handler) { - validateActionName(action); - handler = interceptor.interceptHandler(action, executor, false, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( - action, Streamable.newWriteableReader(requestFactory), taskManager, handler, executor, false, true); - transport.registerRequestHandler(reg); - } - /** * Registers a new request handler * @@ -862,27 +853,6 @@ public void registerRequestHandler(String act transport.registerRequestHandler(reg); } - /** - * Registers a new request handler - * - * @param action The action the request handler is associated with - * @param request The request class that will be used to construct new instances for streaming - * @param executor The executor the request handling will be executed on - * @param forceExecution Force execution on the executor queue and never reject it - * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. - * @param handler The handler itself that implements the request handling - */ - public void registerRequestHandler(String action, Supplier request, - String executor, boolean forceExecution, - boolean canTripCircuitBreaker, - TransportRequestHandler handler) { - validateActionName(action); - handler = interceptor.interceptHandler(action, executor, forceExecution, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( - action, Streamable.newWriteableReader(request), taskManager, handler, executor, forceExecution, canTripCircuitBreaker); - transport.registerRequestHandler(reg); - } - /** * Registers a new request handler * @@ -914,7 +884,8 @@ public void onRequestReceived(long requestId, String action) { try { blockIncomingRequestsLatch.await(); } catch (InterruptedException e) { - logger.trace("interrupted while waiting for incoming requests block to be removed"); + Thread.currentThread().interrupt(); + throw new IllegalStateException("interrupted while waiting for incoming requests block to be removed"); } if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { tracerLog.trace("[{}][{}] received request", requestId, action); @@ -1164,16 +1135,13 @@ void setTimeoutHandler(TimeoutHandler handler) { } static class DirectResponseChannel implements TransportChannel { - final Logger logger; final DiscoveryNode localNode; private final String action; private final long requestId; final TransportService service; final ThreadPool threadPool; - DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId, TransportService service, - ThreadPool threadPool) { - this.logger = logger; + DirectResponseChannel(DiscoveryNode localNode, String action, long requestId, TransportService service, ThreadPool threadPool) { this.localNode = localNode; this.action = action; this.requestId = requestId; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java index b89c06de84a19..2277b56997ecb 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.transport; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -115,14 +114,9 @@ public final class TransportSettings { listSetting("transport.tracer.include", emptyList(), Function.identity(), Setting.Property.Dynamic, Setting.Property.NodeScope); public static final Setting> TRACE_LOG_EXCLUDE_SETTING = listSetting("transport.tracer.exclude", - Arrays.asList("internal:coordination/fault_detection/*", TransportLivenessAction.NAME), + Arrays.asList("internal:coordination/fault_detection/*"), Function.identity(), Setting.Property.Dynamic, Setting.Property.NodeScope); private TransportSettings() { } - - private static Setting fallback(String key, Setting.AffixSetting affixSetting, String regex, String replacement) { - return "_na_".equals(key) ? affixSetting.getConcreteSettingForNamespace(key) - : affixSetting.getConcreteSetting(key.replaceAll(regex, replacement)); - } } diff --git a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java index 979d0018d3c01..f1226e5b0de7f 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java @@ -88,14 +88,9 @@ protected FakeTransportAction(String actionName, ActionFilters actionFilters, Ta protected void doExecute(Task task, FakeRequest request, ActionListener listener) { } } - class FakeAction extends StreamableResponseActionType { + class FakeAction extends ActionType { protected FakeAction() { - super("fake"); - } - - @Override - public ActionResponse newResponse() { - return null; + super("fake", null); } } FakeAction action = new FakeAction(); diff --git a/server/src/test/java/org/elasticsearch/action/ActionTests.java b/server/src/test/java/org/elasticsearch/action/ActionTests.java index b9462ac68e749..aff2291994275 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionTests.java @@ -24,14 +24,9 @@ public class ActionTests extends ESTestCase { public void testEquals() { - class FakeAction extends StreamableResponseActionType { + class FakeAction extends ActionType { protected FakeAction(String name) { - super(name); - } - - @Override - public ActionResponse newResponse() { - return null; + super(name, null); } } FakeAction fakeAction1 = new FakeAction("a"); diff --git a/server/src/test/java/org/elasticsearch/action/ShardOperationFailedExceptionTests.java b/server/src/test/java/org/elasticsearch/action/ShardOperationFailedExceptionTests.java index 1348445b62752..4639b00250623 100644 --- a/server/src/test/java/org/elasticsearch/action/ShardOperationFailedExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ShardOperationFailedExceptionTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.action; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; @@ -58,11 +57,6 @@ private static class Failure extends ShardOperationFailedException { super(index, shardId, reason, status, cause); } - @Override - public void readFrom(StreamInput in) throws IOException { - - } - @Override public void writeTo(StreamOutput out) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java b/server/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java index 5d7f903a4a3cd..f0311bf5764bb 100644 --- a/server/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java @@ -61,8 +61,7 @@ public void testSerialize() throws IOException { new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); request.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); - readRequest.readFrom(in); + ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(in); assertEquals(request.filteringAliases(), readRequest.filteringAliases()); assertArrayEquals(request.types(), readRequest.types()); assertEquals(request.explain(), readRequest.explain()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index 3f9e258ffec1c..fbd3fe0432e6c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -19,10 +19,13 @@ package org.elasticsearch.action.admin; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -42,50 +45,53 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.containsString; +@ESIntegTestCase.ClusterScope(minNumDataNodes = 2) public class ReloadSecureSettingsIT extends ESIntegTestCase { public void testMissingKeystoreFile() throws Exception { final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) - .stream().findFirst().get(); + .stream().findFirst().get(); final Environment environment = internalCluster().getInstance(Environment.class); final AtomicReference reloadSettingsError = new AtomicReference<>(); // keystore file should be missing for this test case Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().execute( - new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class)); - assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing")); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(emptyPassword) + .setNodesIds(Strings.EMPTY_ARRAY).execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class)); + assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing")); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); latch.await(); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); @@ -97,7 +103,7 @@ public void onFailure(Exception e) { public void testInvalidKeystoreFile() throws Exception { final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) - .stream().findFirst().get(); + .stream().findFirst().get(); final Environment environment = internalCluster().getInstance(Environment.class); final AtomicReference reloadSettingsError = new AtomicReference<>(); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); @@ -109,35 +115,163 @@ public void testInvalidKeystoreFile() throws Exception { Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); } final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().execute( - new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(emptyPassword) + .setNodesIds(Strings.EMPTY_ARRAY).execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the invalid keystore format case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testReloadAllNodesWithPasswordWithoutTLSFails() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + final char[] password = randomAlphaOfLength(12).toCharArray(); + writeEmptyKeystore(environment, password); + final CountDownLatch latch = new CountDownLatch(1); + client().admin() + .cluster() + .prepareReloadSecureSettings() + // No filter should try to hit all nodes + .setNodesIds(Strings.EMPTY_ARRAY) + .setSecureStorePassword(new SecureString(password)) + .execute(new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + reloadSettingsError.set(new AssertionError("Nodes request succeeded when it should have failed", null)); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, instanceOf(ElasticsearchException.class)); + assertThat(e.getMessage(), + containsString("Secure settings cannot be updated cluster wide when TLS for the transport layer is not enabled")); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + //no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testReloadLocalNodeWithPasswordWithoutTLSSucceeds() throws Exception { + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final char[] password = randomAlphaOfLength(12).toCharArray(); + writeEmptyKeystore(environment, password); + final CountDownLatch latch = new CountDownLatch(1); + client().admin() + .cluster() + .prepareReloadSecureSettings() + .setNodesIds("_local") + .setSecureStorePassword(new SecureString(password)) + .execute(new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(1)); + assertThat(nodesReloadResponse.getNodes().size(), equalTo(1)); + final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse = nodesReloadResponse.getNodes().get(0); + assertThat(nodeResponse.reloadException(), nullValue()); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + } - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + public void testWrongKeystorePassword() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + // "some" keystore should be present in this case + writeEmptyKeystore(environment, new char[0]); + final CountDownLatch latch = new CountDownLatch(1); + client().admin() + .cluster() + .prepareReloadSecureSettings() + .setNodesIds("_local") + .setSecureStorePassword(new SecureString(new char[]{'W', 'r', 'o', 'n', 'g'})) + .execute(new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(1)); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException(), instanceOf(SecurityException.class)); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); latch.await(); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } - // in the invalid keystore format case no reload should be triggered + // in the wrong password case no reload should be triggered assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); } @@ -145,12 +279,12 @@ public void testMisbehavingPlugin() throws Exception { final Environment environment = internalCluster().getInstance(Environment.class); final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) - .stream().findFirst().get(); + .stream().findFirst().get(); // make plugins throw on reload for (final String nodeName : internalCluster().getNodeNames()) { internalCluster().getInstance(PluginsService.class, nodeName) - .filterPlugins(MisbehavingReloadablePlugin.class) - .stream().findFirst().get().setShouldThrow(true); + .filterPlugins(MisbehavingReloadablePlugin.class) + .stream().findFirst().get().setShouldThrow(true); } final AtomicReference reloadSettingsError = new AtomicReference<>(); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); @@ -158,34 +292,36 @@ public void testMisbehavingPlugin() throws Exception { final SecureSettings secureSettings = writeEmptyKeystore(environment, new char[0]); // read seed setting value from the test case (not from the node) final String seedValue = KeyStoreWrapper.SEED_SETTING - .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) - .toString(); + .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) + .toString(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().execute( - new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw")); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(emptyPassword) + .setNodesIds(Strings.EMPTY_ARRAY).execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw")); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); latch.await(); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); @@ -200,7 +336,7 @@ public void onFailure(Exception e) { public void testReloadWhileKeystoreChanged() throws Exception { final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) - .stream().findFirst().get(); + .stream().findFirst().get(); final Environment environment = internalCluster().getInstance(Environment.class); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); for (int i = 0; i < randomIntBetween(4, 8); i++) { @@ -208,8 +344,8 @@ public void testReloadWhileKeystoreChanged() throws Exception { final SecureSettings secureSettings = writeEmptyKeystore(environment, new char[0]); // read seed setting value from the test case (not from the node) final String seedValue = KeyStoreWrapper.SEED_SETTING - .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) - .toString(); + .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) + .toString(); // reload call successfulReloadCall(); assertThat(mockReloadablePlugin.getSeedValue(), equalTo(seedValue)); @@ -228,30 +364,32 @@ protected Collection> nodePlugins() { private void successfulReloadCall() throws InterruptedException { final AtomicReference reloadSettingsError = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().execute( - new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), nullValue()); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(emptyPassword) + .setNodesIds(Strings.EMPTY_ARRAY).execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), nullValue()); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); latch.await(); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 012f801698f96..448ac1dde82d2 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -28,12 +28,13 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -49,7 +50,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class ClusterHealthResponsesTests extends AbstractStreamableXContentTestCase { +public class ClusterHealthResponsesTests extends AbstractSerializingTestCase { private final ClusterHealthRequest.Level level = randomFrom(ClusterHealthRequest.Level.values()); public void testIsTimeout() { @@ -108,11 +109,6 @@ protected ClusterHealthResponse doParseInstance(XContentParser parser) { return ClusterHealthResponse.fromXContent(parser); } - @Override - protected ClusterHealthResponse createBlankInstance() { - return new ClusterHealthResponse(); - } - @Override protected ClusterHealthResponse createTestInstance() { int indicesSize = randomInt(20); @@ -131,6 +127,11 @@ protected ClusterHealthResponse createTestInstance() { TimeValue.timeValueMillis(randomInt(10000)), randomBoolean(), stateHealth); } + @Override + protected Writeable.Reader instanceReader() { + return ClusterHealthResponse::new; + } + @Override protected ToXContent.Params getToXContentParams() { return new ToXContent.MapParams(Collections.singletonMap("level", level.name().toLowerCase(Locale.ROOT))); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index a0aab680bc039..57395859c503f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -58,7 +58,7 @@ public void testSerialization() throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { nodeStats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { - NodeStats deserializedNodeStats = NodeStats.readNodeStats(in); + NodeStats deserializedNodeStats = new NodeStats(in); assertEquals(nodeStats.getNode(), deserializedNodeStats.getNode()); assertEquals(nodeStats.getTimestamp(), deserializedNodeStats.getTimestamp()); if (nodeStats.getOs() == null) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 0a1e5c512df88..49a4776a803af 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -65,14 +65,13 @@ public CancellableNodeRequest() { super(); } - public CancellableNodeRequest(CancellableNodesRequest request) { - requestName = request.requestName; + public CancellableNodeRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - requestName = in.readString(); + public CancellableNodeRequest(CancellableNodesRequest request) { + requestName = request.requestName; } @Override @@ -100,8 +99,9 @@ public boolean shouldCancelChildrenOnCancellation() { public static class CancellableNodesRequest extends BaseNodesRequest { private String requestName; - private CancellableNodesRequest() { - super(); + private CancellableNodesRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); } public CancellableNodesRequest(String requestName, String... nodesIds) { @@ -109,12 +109,6 @@ public CancellableNodesRequest(String requestName, String... nodesIds) { this.requestName = requestName; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - requestName = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index bfaac83d51913..eac851f3abedc 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -61,7 +62,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Function; -import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -102,8 +102,8 @@ public final void shutdownTestNodes() throws Exception { static class NodeResponse extends BaseNodeResponse { - protected NodeResponse() { - super(); + protected NodeResponse(StreamInput in) throws IOException { + super(in); } protected NodeResponse(DiscoveryNode node) { @@ -119,12 +119,12 @@ protected NodesResponse(ClusterName clusterName, List nodes, List< @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readStreamableList(NodeResponse::new); + return in.readList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } public int failureCount() { @@ -139,8 +139,8 @@ abstract class AbstractTestNodesAction { AbstractTestNodesAction(String actionName, ThreadPool threadPool, - ClusterService clusterService, TransportService transportService, Supplier request, - Supplier nodeRequest) { + ClusterService clusterService, TransportService transportService, Writeable.Reader request, + Writeable.Reader nodeRequest) { super(actionName, threadPool, clusterService, transportService, new ActionFilters(new HashSet<>()), request, nodeRequest, ThreadPool.Names.GENERIC, NodeResponse.class); @@ -152,8 +152,8 @@ protected NodesResponse newResponse(NodesRequest request, List res } @Override - protected NodeResponse newNodeResponse() { - return new NodeResponse(); + protected NodeResponse newNodeResponse(StreamInput in) throws IOException { + return new NodeResponse(in); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index d9c142498c917..3c4b59ab78492 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -18,14 +18,13 @@ */ package org.elasticsearch.action.admin.cluster.node.tasks; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; @@ -128,8 +127,8 @@ public void unblock() { public static class NodeResponse extends BaseNodeResponse { - protected NodeResponse() { - super(); + public NodeResponse(StreamInput in) throws IOException { + super(in); } public NodeResponse(DiscoveryNode node) { @@ -139,8 +138,8 @@ public NodeResponse(DiscoveryNode node) { public static class NodesResponse extends BaseNodesResponse implements ToXContentFragment { - NodesResponse() { - + public NodesResponse(StreamInput in) throws IOException { + super(in); } public NodesResponse(ClusterName clusterName, List nodes, List failures) { @@ -149,12 +148,12 @@ public NodesResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readStreamableList(NodeResponse::new); + return in.readList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } public int getFailureCount() { @@ -172,8 +171,10 @@ public static class NodeRequest extends BaseNodeRequest { protected String requestName; protected boolean shouldBlock; - public NodeRequest() { - super(); + public NodeRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); + shouldBlock = in.readBoolean(); } public NodeRequest(NodesRequest request, boolean shouldBlock) { @@ -181,13 +182,6 @@ public NodeRequest(NodesRequest request, boolean shouldBlock) { this.shouldBlock = shouldBlock; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - requestName = in.readString(); - shouldBlock = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -212,8 +206,12 @@ public static class NodesRequest extends BaseNodesRequest { private boolean shouldBlock = true; private boolean shouldFail = false; - NodesRequest() { - super(); + NodesRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); + shouldStoreResult = in.readBoolean(); + shouldBlock = in.readBoolean(); + shouldFail = in.readBoolean(); } public NodesRequest(String requestName, String... nodesIds) { @@ -246,15 +244,6 @@ public boolean getShouldFail() { return shouldFail; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - requestName = in.readString(); - shouldStoreResult = in.readBoolean(); - shouldBlock = in.readBoolean(); - shouldFail = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -302,8 +291,8 @@ protected NodeRequest newNodeRequest(NodesRequest request) { } @Override - protected NodeResponse newNodeResponse() { - return new NodeResponse(); + protected NodeResponse newNodeResponse(StreamInput in) throws IOException { + return new NodeResponse(in); } @Override @@ -331,18 +320,13 @@ protected NodeResponse nodeOperation(NodeRequest request, Task task) { } } - public static class TestTaskAction extends StreamableResponseActionType { + public static class TestTaskAction extends ActionType { public static final TestTaskAction INSTANCE = new TestTaskAction(); public static final String NAME = "cluster:admin/tasks/test"; private TestTaskAction() { - super(NAME); - } - - @Override - public NodesResponse newResponse() { - return new NodesResponse(); + super(NAME, NodesResponse::new); } } @@ -462,12 +446,7 @@ public static class UnblockTestTasksAction extends ActionType getResponseReader() { - return UnblockTestTasksResponse::new; + super(NAME, UnblockTestTasksResponse::new); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 6d8749ad4f4c8..f429c3bed8c48 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -80,20 +80,15 @@ public class TransportTasksActionTests extends TaskManagerTestCase { public static class NodeRequest extends BaseNodeRequest { protected String requestName; - public NodeRequest() { - super(); + public NodeRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); } public NodeRequest(NodesRequest request) { requestName = request.requestName; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - requestName = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -114,8 +109,9 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, public static class NodesRequest extends BaseNodesRequest { private String requestName; - NodesRequest() { - super(); + NodesRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); } public NodesRequest(String requestName, String... nodesIds) { @@ -123,12 +119,6 @@ public NodesRequest(String requestName, String... nodesIds) { this.requestName = requestName; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - requestName = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -161,8 +151,8 @@ protected NodeRequest newNodeRequest(NodesRequest request) { } @Override - protected NodeResponse newNodeResponse() { - return new NodeResponse(); + protected NodeResponse newNodeResponse(StreamInput in) throws IOException { + return new NodeResponse(in); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index dfef08944de60..92c010f641782 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -165,9 +165,7 @@ private ClusterRerouteRequest roundTripThroughBytes(ClusterRerouteRequest origin try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - ClusterRerouteRequest copy = new ClusterRerouteRequest(); - copy.readFrom(in); - return copy; + return new ClusterRerouteRequest(in); } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java index 853a991cf937f..5cc5f98a49282 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java @@ -68,8 +68,7 @@ public void testSerializeRequest() throws IOException { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(NetworkModule.getNamedWriteables()); StreamInput wrap = new NamedWriteableAwareStreamInput(bytes.streamInput(), namedWriteableRegistry); - ClusterRerouteRequest deserializedReq = new ClusterRerouteRequest(); - deserializedReq.readFrom(wrap); + ClusterRerouteRequest deserializedReq = new ClusterRerouteRequest(wrap); assertEquals(req.isRetryFailed(), deserializedReq.isRetryFailed()); assertEquals(req.dryRun(), deserializedReq.dryRun()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java index dee5314b9672d..182ec61e4754b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java @@ -48,7 +48,7 @@ import static org.hamcrest.CoreMatchers.containsString; public class GetSnapshotsResponseTests extends ESTestCase { - // We can not subclass AbstractStreamableXContentTestCase because it + // We can not subclass AbstractSerializingTestCase because it // can only be used for instances with equals and hashCode // GetSnapshotResponse does not override equals and hashCode. // It does not override equals and hashCode, because it diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponseTests.java index 17d1ecafabdae..f51fc90ab11e2 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponseTests.java @@ -40,7 +40,7 @@ protected RestoreSnapshotResponse createTestInstance() { int successfulShards = randomIntBetween(0, totalShards); return new RestoreSnapshotResponse(new RestoreInfo(name, indices, totalShards, successfulShards)); } else { - return new RestoreSnapshotResponse(null); + return new RestoreSnapshotResponse((RestoreInfo) null); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponseTests.java index 92532b89a2f8f..7412727b42e49 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponseTests.java @@ -24,14 +24,10 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; -public class ClusterStateResponseTests extends AbstractStreamableTestCase { - - @Override - protected ClusterStateResponse createBlankInstance() { - return new ClusterStateResponse(); - } +public class ClusterStateResponseTests extends AbstractWireSerializingTestCase { @Override protected ClusterStateResponse createTestInstance() { @@ -48,6 +44,11 @@ protected ClusterStateResponse createTestInstance() { return new ClusterStateResponse(clusterName, clusterState, randomBoolean()); } + @Override + protected Writeable.Reader instanceReader() { + return ClusterStateResponse::new; + } + @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java index 1c92c0c8c2bf7..8bea7fecc571a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java @@ -17,18 +17,19 @@ * under the License. */ +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.script.Script; import org.elasticsearch.script.StoredScriptSource; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.Collections; import java.util.Map; import java.util.function.Predicate; -public class GetStoredScriptResponseTests extends AbstractStreamableXContentTestCase { +public class GetStoredScriptResponseTests extends AbstractSerializingTestCase { @Override protected GetStoredScriptResponse doParseInstance(XContentParser parser) throws IOException { @@ -36,13 +37,13 @@ protected GetStoredScriptResponse doParseInstance(XContentParser parser) throws } @Override - protected GetStoredScriptResponse createBlankInstance() { - return new GetStoredScriptResponse(); + protected GetStoredScriptResponse createTestInstance() { + return new GetStoredScriptResponse(randomAlphaOfLengthBetween(1, 10), randomScriptSource()); } @Override - protected GetStoredScriptResponse createTestInstance() { - return new GetStoredScriptResponse(randomAlphaOfLengthBetween(1, 10), randomScriptSource()); + protected Writeable.Reader instanceReader() { + return GetStoredScriptResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java index 821c75c2ed7d3..2bd96045558bf 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java @@ -43,8 +43,7 @@ public void testSerialization() throws IOException { storedScriptRequest.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { - PutStoredScriptRequest serialized = new PutStoredScriptRequest(); - serialized.readFrom(in); + PutStoredScriptRequest serialized = new PutStoredScriptRequest(in); assertEquals(XContentType.JSON, serialized.xContentType()); assertEquals(storedScriptRequest.id(), serialized.id()); assertEquals(storedScriptRequest.context(), serialized.context()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 2aaf2507e3ba7..d3c4db8d5ada8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -19,8 +19,10 @@ package org.elasticsearch.action.admin.cluster.tasks; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.InternalTestCluster; import java.util.Arrays; @@ -30,9 +32,9 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE; -@ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class PendingTasksBlocksIT extends ESIntegTestCase { - public void testPendingTasksWithBlocks() { + + public void testPendingTasksWithIndexBlocks() { createIndex("test"); ensureGreen("test"); @@ -41,19 +43,56 @@ public void testPendingTasksWithBlocks() { SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test", blockSetting); - PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet(); + PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().get(); assertNotNull(response.getPendingTasks()); } finally { disableIndexBlock("test", blockSetting); } } + } + + public void testPendingTasksWithClusterReadOnlyBlock() { + if (randomBoolean()) { + createIndex("test"); + ensureGreen("test"); + } try { setClusterReadOnly(true); - PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet(); + PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().get(); assertNotNull(response.getPendingTasks()); } finally { setClusterReadOnly(false); } } + + public void testPendingTasksWithClusterNotRecoveredBlock() throws Exception { + if (randomBoolean()) { + createIndex("test"); + ensureGreen("test"); + } + + // restart the cluster but prevent it from performing state recovery + final int nodeCount = client().admin().cluster().prepareNodesInfo("data:true", "master:true").get().getNodes().size(); + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder() + .put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), nodeCount + 1) + .build(); + } + + @Override + public boolean validateClusterForming() { + return false; + } + }); + + assertNotNull(client().admin().cluster().preparePendingClusterTasks().get().getPendingTasks()); + + // starting one more node allows the cluster to recover + internalCluster().startNode(); + ensureGreen(); + } + } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java index 6c15c419d092c..d3bf9d7397373 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java @@ -22,7 +22,8 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.AliasMetaData.Builder; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.ArrayList; import java.util.HashSet; @@ -30,7 +31,7 @@ import java.util.List; import java.util.Set; -public class GetAliasesResponseTests extends AbstractStreamableTestCase { +public class GetAliasesResponseTests extends AbstractWireSerializingTestCase { @Override protected GetAliasesResponse createTestInstance() { @@ -38,8 +39,8 @@ protected GetAliasesResponse createTestInstance() { } @Override - protected GetAliasesResponse createBlankInstance() { - return new GetAliasesResponse(); + protected Writeable.Reader instanceReader() { + return GetAliasesResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java index 95fc010f37f86..71c74dcb3da01 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java @@ -19,20 +19,25 @@ package org.elasticsearch.action.admin.indices.analyze; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.AnalyzeToken; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.RandomObjects; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; -public class AnalyzeResponseTests extends ESTestCase { +public class AnalyzeResponseTests extends AbstractWireSerializingTestCase { @SuppressWarnings("unchecked") public void testNullResponseToXContent() throws IOException { @@ -59,6 +64,64 @@ public void testNullResponseToXContent() throws IOException { assertThat(nullTokens.size(), equalTo(0)); assertThat(name, equalTo(nameValue)); } + } + + public void testConstructorArgs() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new AnalyzeAction.Response(null, null)); + assertEquals("Neither token nor detail set on AnalysisAction.Response", ex.getMessage()); + } + @Override + protected AnalyzeAction.Response createTestInstance() { + int tokenCount = randomIntBetween(0, 30); + AnalyzeAction.AnalyzeToken[] tokens = new AnalyzeAction.AnalyzeToken[tokenCount]; + for (int i = 0; i < tokenCount; i++) { + tokens[i] = RandomObjects.randomToken(random()); + } + if (randomBoolean()) { + AnalyzeAction.CharFilteredText[] charfilters = null; + AnalyzeAction.AnalyzeTokenList[] tokenfilters = null; + if (randomBoolean()) { + charfilters = new AnalyzeAction.CharFilteredText[]{ + new AnalyzeAction.CharFilteredText("my_charfilter", new String[]{"one two"}) + }; + } + if (randomBoolean()) { + tokenfilters = new AnalyzeAction.AnalyzeTokenList[]{ + new AnalyzeAction.AnalyzeTokenList("my_tokenfilter_1", tokens), + new AnalyzeAction.AnalyzeTokenList("my_tokenfilter_2", tokens) + }; + } + AnalyzeAction.DetailAnalyzeResponse dar = new AnalyzeAction.DetailAnalyzeResponse( + charfilters, + new AnalyzeAction.AnalyzeTokenList("my_tokenizer", tokens), + tokenfilters); + return new AnalyzeAction.Response(null, dar); + } + return new AnalyzeAction.Response(Arrays.asList(tokens), null); } + + /** + * Either add a token to the token list or change the details token list name + */ + @Override + protected AnalyzeAction.Response mutateInstance(AnalyzeAction.Response instance) throws IOException { + if (instance.getTokens() != null) { + List extendedList = new ArrayList<>(instance.getTokens()); + extendedList.add(RandomObjects.randomToken(random())); + return new AnalyzeAction.Response(extendedList, null); + } else { + AnalyzeToken[] tokens = instance.detail().tokenizer().getTokens(); + return new AnalyzeAction.Response(null, new AnalyzeAction.DetailAnalyzeResponse( + instance.detail().charfilters(), + new AnalyzeAction.AnalyzeTokenList("my_other_tokenizer", tokens), + instance.detail().tokenfilters())); + } + } + + @Override + protected Reader instanceReader() { + return AnalyzeAction.Response::new; + } + } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java index df940012bf24d..f6c500fe3f22b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -35,9 +35,9 @@ public void testSerialization() throws Exception { try (BytesStreamOutput out = new BytesStreamOutput()) { request.writeTo(out); - final CloseIndexRequest deserializedRequest = new CloseIndexRequest(); + final CloseIndexRequest deserializedRequest; try (StreamInput in = out.bytes().streamInput()) { - deserializedRequest.readFrom(in); + deserializedRequest = new CloseIndexRequest(in); } assertEquals(request.timeout(), deserializedRequest.timeout()); assertEquals(request.masterNodeTimeout(), deserializedRequest.masterNodeTimeout()); @@ -74,11 +74,11 @@ public void testBwcSerialization() throws Exception { out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); - final CloseIndexRequest deserializedRequest = new CloseIndexRequest(); + final CloseIndexRequest deserializedRequest; try (StreamInput in = out.bytes().streamInput()) { in.setVersion(VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_7_2_0))); - deserializedRequest.readFrom(in); + deserializedRequest = new CloseIndexRequest(in); } assertEquals(sample.getParentTask(), deserializedRequest.getParentTask()); assertEquals(sample.masterNodeTimeout(), deserializedRequest.masterNodeTimeout()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java index 1c27934927413..2dbcbf4aff62d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -53,8 +53,7 @@ public void testSerialization() throws IOException { request.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { - CreateIndexRequest serialized = new CreateIndexRequest(); - serialized.readFrom(in); + CreateIndexRequest serialized = new CreateIndexRequest(in); assertEquals(request.index(), serialized.index()); assertEquals(mapping, serialized.mappings().get("my_type")); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index c7cc99d93e685..582ab09a1f868 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -558,7 +558,8 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { } public void testShrinkThenSplitWithFailedNode() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(3); + internalCluster().ensureAtLeastNumDataNodes(2); + final String shrinkNode = internalCluster().startDataOnlyNode(); final int shardCount = between(2, 5); prepareCreate("original").setSettings(Settings.builder().put(indexSettings()) @@ -566,8 +567,6 @@ public void testShrinkThenSplitWithFailedNode() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, shardCount)).get(); client().admin().indices().prepareFlush("original").get(); ensureGreen(); - final String shrinkNode - = client().admin().cluster().prepareNodesInfo("data:true").clear().get().getNodes().get(0).getNode().getName(); client().admin().indices().prepareUpdateSettings("original") .setSettings(Settings.builder() .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), shrinkNode) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java index f6ca1c4f742a0..5fc8ce5fe3cfd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java @@ -85,13 +85,12 @@ public void testResponseStreaming() throws IOException { BytesStreamOutput out = new BytesStreamOutput(); testPlan.result.writeTo(out); StreamInput in = out.bytes().streamInput(); - SyncedFlushResponse readResponse = new SyncedFlushResponse(); - readResponse.readFrom(in); + SyncedFlushResponse readResponse = new SyncedFlushResponse(in); assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total)); assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful)); assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed)); assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - assertThat(readResponse.shardsResultPerIndex.size(), equalTo(testPlan.result.getShardsResultPerIndex().size())); + assertThat(readResponse.getShardsResultPerIndex().size(), equalTo(testPlan.result.getShardsResultPerIndex().size())); for (Map.Entry> entry : readResponse.getShardsResultPerIndex().entrySet()) { List originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey()); assertNotNull(originalShardsResults); @@ -169,7 +168,7 @@ protected TestPlan createTestPlan() { shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); } else { successful++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse()); + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null)); } } shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java index 75dbc0783d6b1..0ff854af20c54 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -82,7 +83,7 @@ public void tearDown() throws Exception { public void testIncludeDefaults() { GetIndexRequest defaultsRequest = new GetIndexRequest().indices(indexName).includeDefaults(true); - getIndexAction.execute(null, defaultsRequest, ActionListener.wrap( + ActionTestUtils.execute(getIndexAction, null, defaultsRequest, ActionListener.wrap( defaultsResponse -> assertNotNull( "index.refresh_interval should be set as we are including defaults", defaultsResponse.getSetting(indexName, "index.refresh_interval") @@ -94,7 +95,7 @@ public void testIncludeDefaults() { public void testDoNotIncludeDefaults() { GetIndexRequest noDefaultsRequest = new GetIndexRequest().indices(indexName); - getIndexAction.execute(null, noDefaultsRequest, ActionListener.wrap( + ActionTestUtils.execute(getIndexAction, null, noDefaultsRequest, ActionListener.wrap( noDefaultsResponse -> assertNull( "index.refresh_interval should be null as it was never set", noDefaultsResponse.getSetting(indexName, "index.refresh_interval") diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexResponseTests.java index c50249dc765d4..1c277545cbbce 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexResponseTests.java @@ -25,13 +25,14 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.ArrayList; @@ -40,7 +41,7 @@ import java.util.List; import java.util.function.Predicate; -public class GetIndexResponseTests extends AbstractStreamableXContentTestCase { +public class GetIndexResponseTests extends AbstractSerializingTestCase { @Override protected GetIndexResponse doParseInstance(XContentParser parser) throws IOException { @@ -48,8 +49,8 @@ protected GetIndexResponse doParseInstance(XContentParser parser) throws IOExcep } @Override - protected GetIndexResponse createBlankInstance() { - return new GetIndexResponse(); + protected Writeable.Reader instanceReader() { + return GetIndexResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java index 677a7b4b7eced..6586d8af0ae5c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java @@ -22,12 +22,13 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; @@ -39,7 +40,7 @@ import java.util.Map; import java.util.Objects; -public class GetMappingsResponseTests extends AbstractStreamableXContentTestCase { +public class GetMappingsResponseTests extends AbstractSerializingTestCase { @Override protected boolean supportsUnknownFields() { @@ -57,8 +58,8 @@ protected GetMappingsResponse doParseInstance(XContentParser parser) throws IOEx } @Override - protected GetMappingsResponse createBlankInstance() { - return new GetMappingsResponse(); + protected Writeable.Reader instanceReader() { + return GetMappingsResponse::new; } private static GetMappingsResponse mutate(GetMappingsResponse original) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index b3e89e5054ff3..c866d7d279cde 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -124,8 +124,7 @@ public void testSerialize() throws Exception { originalRequest.writeTo(out); BytesReference bytes = out.bytes(); try (StreamInput in = new NamedWriteableAwareStreamInput(bytes.streamInput(), writeableRegistry)) { - RolloverRequest cloneRequest = new RolloverRequest(); - cloneRequest.readFrom(in); + RolloverRequest cloneRequest = new RolloverRequest(in); assertThat(cloneRequest.getNewIndexName(), equalTo(originalRequest.getNewIndexName())); assertThat(cloneRequest.getAlias(), equalTo(originalRequest.getAlias())); for (Map.Entry> entry : cloneRequest.getConditions().entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java index d119dcb1135c4..db443cf97f126 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -97,7 +98,7 @@ public void tearDown() throws Exception { public void testIncludeDefaults() { GetSettingsRequest noDefaultsRequest = new GetSettingsRequest().indices(indexName); - getSettingsAction.execute(null, noDefaultsRequest, ActionListener.wrap(noDefaultsResponse -> { + ActionTestUtils.execute(getSettingsAction, null, noDefaultsRequest, ActionListener.wrap(noDefaultsResponse -> { assertNull("index.refresh_interval should be null as it was never set", noDefaultsResponse.getSetting(indexName, "index.refresh_interval")); }, exception -> { @@ -106,7 +107,7 @@ public void testIncludeDefaults() { GetSettingsRequest defaultsRequest = new GetSettingsRequest().indices(indexName).includeDefaults(true); - getSettingsAction.execute(null, defaultsRequest, ActionListener.wrap(defaultsResponse -> { + ActionTestUtils.execute(getSettingsAction, null, defaultsRequest, ActionListener.wrap(defaultsResponse -> { assertNotNull("index.refresh_interval should be set as we are including defaults", defaultsResponse.getSetting(indexName, "index.refresh_interval")); }, exception -> { @@ -118,7 +119,7 @@ public void testIncludeDefaults() { public void testIncludeDefaultsWithFiltering() { GetSettingsRequest defaultsRequest = new GetSettingsRequest().indices(indexName).includeDefaults(true) .names("index.refresh_interval"); - getSettingsAction.execute(null, defaultsRequest, ActionListener.wrap(defaultsResponse -> { + ActionTestUtils.execute(getSettingsAction, null, defaultsRequest, ActionListener.wrap(defaultsResponse -> { assertNotNull("index.refresh_interval should be set as we are including defaults", defaultsResponse.getSetting(indexName, "index.refresh_interval")); assertNull("index.number_of_shards should be null as this query is filtered", diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java index e936d64a55d6a..3dfb30bed1e84 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java @@ -20,11 +20,12 @@ package org.elasticsearch.action.admin.indices.settings.get; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.RandomCreateIndexGenerator; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.HashMap; @@ -32,12 +33,7 @@ import java.util.Set; import java.util.function.Predicate; -public class GetSettingsResponseTests extends AbstractStreamableXContentTestCase { - - @Override - protected GetSettingsResponse createBlankInstance() { - return new GetSettingsResponse(); - } +public class GetSettingsResponseTests extends AbstractSerializingTestCase { @Override protected GetSettingsResponse createTestInstance() { @@ -80,6 +76,11 @@ protected GetSettingsResponse createTestInstance() { return new GetSettingsResponse(immutableIndexToSettings, immutableIndexToDefaultSettings); } + @Override + protected Writeable.Reader instanceReader() { + return GetSettingsResponse::new; + } + @Override protected GetSettingsResponse doParseInstance(XContentParser parser) throws IOException { return GetSettingsResponse.fromXContent(parser); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java similarity index 94% rename from server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java rename to server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java index 5732630e20cef..db67bfb64926f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java @@ -20,11 +20,12 @@ package org.elasticsearch.action.admin.indices.settings.put; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -35,7 +36,7 @@ import java.util.StringJoiner; import java.util.function.Supplier; -public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTestCase { +public class UpdateSettingsRequestSerializationTests extends AbstractWireSerializingTestCase { @Override protected UpdateSettingsRequest mutateInstance(UpdateSettingsRequest request) { @@ -60,8 +61,8 @@ protected UpdateSettingsRequest createTestInstance() { } @Override - protected UpdateSettingsRequest createBlankInstance() { - return new UpdateSettingsRequest(); + protected Writeable.Reader instanceReader() { + return UpdateSettingsRequest::new; } public static UpdateSettingsRequest createTestItem() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java index ff75dbecd520c..ef2b13fc6d087 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java @@ -32,7 +32,7 @@ public class UpdateSettingsRequestTests extends AbstractXContentTestCase getRandomFieldsExcludeFilter() { @Override protected void assertEqualInstances(UpdateSettingsRequest expectedInstance, UpdateSettingsRequest newInstance) { // here only the settings should be tested, as this test covers explicitly only the XContent parsing - // the rest of the request fields are tested by the StreamableTests + // the rest of the request fields are tested by the SerializingTests super.assertEqualInstances(new UpdateSettingsRequest(expectedInstance.settings()), new UpdateSettingsRequest(newInstance.settings())); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java index eaea5babe3b7b..49faaa1f0d25e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -33,7 +34,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -114,8 +114,7 @@ public void testErrorCondition() { RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard - routingTable = service.applyStartedShards(clusterState, - routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); TransportResizeAction.prepareCreateIndexRequest(new ResizeRequest("target", "source"), clusterState, @@ -133,8 +132,7 @@ public void testPassNumRoutingShards() { RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard - routingTable = service.applyStartedShards(clusterState, - routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); ResizeRequest resizeRequest = new ResizeRequest("target", "source"); @@ -163,8 +161,7 @@ public void testPassNumRoutingShardsAndFail() { RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard - routingTable = service.applyStartedShards(clusterState, - routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); ResizeRequest resizeRequest = new ResizeRequest("target", "source"); @@ -198,8 +195,7 @@ public void testShrinkIndexSettings() { RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard - routingTable = service.applyStartedShards(clusterState, - routingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, indexName).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); int numSourceShards = clusterState.metaData().index(indexName).getNumberOfShards(); DocsStats stats = new DocsStats(between(0, (IndexWriter.MAX_DOCS) / numSourceShards), between(1, 1000), between(1, 10000)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java index 99850699ec2be..002749294071d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -46,7 +46,7 @@ public class IndicesStatsResponseTests extends ESTestCase { public void testInvalidLevel() { - final IndicesStatsResponse response = new IndicesStatsResponse(); + final IndicesStatsResponse response = new IndicesStatsResponse(null, 0, 0, 0, null); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java index db167e0c7669e..45f0711319a06 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java @@ -19,12 +19,13 @@ package org.elasticsearch.action.admin.indices.validate.query; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; -public class QueryExplanationTests extends AbstractStreamableXContentTestCase { +public class QueryExplanationTests extends AbstractSerializingTestCase { static QueryExplanation createRandomQueryExplanation(boolean isValid) { String index = "index_" + randomInt(1000); @@ -48,12 +49,12 @@ protected QueryExplanation doParseInstance(XContentParser parser) throws IOExcep } @Override - protected QueryExplanation createBlankInstance() { - return new QueryExplanation(); + protected QueryExplanation createTestInstance() { + return createRandomQueryExplanation(); } @Override - protected QueryExplanation createTestInstance() { - return createRandomQueryExplanation(); + protected Writeable.Reader instanceReader() { + return QueryExplanation::new; } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index decee8ceab714..f5d881e2b04a6 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -226,7 +226,7 @@ public void bulk(BulkRequest request, ActionListener listener) { } private BulkItemResponse successfulResponse() { - return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse()); + return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse(null, null, null, 0, 0, 0, false)); } private BulkItemResponse failedResponse() { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 95afbbf54f573..1822ed75d6091 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -219,7 +220,7 @@ public void testIngestSkipped() throws Exception { IndexRequest indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source(Collections.emptyMap()); bulkRequest.add(indexRequest); - action.execute(null, bulkRequest, ActionListener.wrap(response -> {}, exception -> { + ActionTestUtils.execute(action, null, bulkRequest, ActionListener.wrap(response -> {}, exception -> { throw new AssertionError(exception); })); assertTrue(action.isExecuted); @@ -229,7 +230,7 @@ public void testIngestSkipped() throws Exception { public void testSingleItemBulkActionIngestSkipped() throws Exception { IndexRequest indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source(Collections.emptyMap()); - singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap(response -> {}, exception -> { + ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, ActionListener.wrap(response -> {}, exception -> { throw new AssertionError(exception); })); assertTrue(action.isExecuted); @@ -250,7 +251,7 @@ public void testIngestLocal() throws Exception { AtomicBoolean responseCalled = new AtomicBoolean(false); AtomicBoolean failureCalled = new AtomicBoolean(false); - action.execute(null, bulkRequest, ActionListener.wrap( + ActionTestUtils.execute(action, null, bulkRequest, ActionListener.wrap( response -> { BulkItemResponse itemResponse = response.iterator().next(); assertThat(itemResponse.getFailure().getMessage(), containsString("fake exception")); @@ -286,7 +287,7 @@ public void testSingleItemBulkActionIngestLocal() throws Exception { indexRequest.setPipeline("testpipeline"); AtomicBoolean responseCalled = new AtomicBoolean(false); AtomicBoolean failureCalled = new AtomicBoolean(false); - singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap( + ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, ActionListener.wrap( response -> { responseCalled.set(true); }, @@ -328,7 +329,7 @@ public void testIngestForward() throws Exception { e -> { throw new AssertionError(e); }); - action.execute(null, bulkRequest, listener); + ActionTestUtils.execute(action, null, bulkRequest, listener); // should not have executed ingest locally verify(ingestService, never()).executeBulkRequest(any(), any(), any(), any()); @@ -348,7 +349,7 @@ public void testIngestForward() throws Exception { // now make sure ingest nodes are rotated through with a subsequent request reset(transportService); - action.execute(null, bulkRequest, listener); + ActionTestUtils.execute(action, null, bulkRequest, listener); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); if (usedNode1) { assertSame(remoteNode2, node.getValue()); @@ -372,7 +373,7 @@ public void testSingleItemBulkActionIngestForward() throws Exception { e -> { throw new AssertionError(e); }); - singleItemBulkWriteAction.execute(null, indexRequest, listener); + ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, listener); // should not have executed ingest locally verify(ingestService, never()).executeBulkRequest(any(), any(), any(), any()); @@ -395,7 +396,7 @@ public void testSingleItemBulkActionIngestForward() throws Exception { // now make sure ingest nodes are rotated through with a subsequent request reset(transportService); - singleItemBulkWriteAction.execute(null, indexRequest, listener); + ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, listener); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); if (usedNode1) { assertSame(remoteNode2, node.getValue()); @@ -444,7 +445,7 @@ private void validatePipelineWithBulkUpsert(@Nullable String indexRequestIndexNa assertNull(indexRequest1.getPipeline()); assertNull(indexRequest2.getPipeline()); assertNull(indexRequest3.getPipeline()); - action.execute(null, bulkRequest, ActionListener.wrap( + ActionTestUtils.execute(action, null, bulkRequest, ActionListener.wrap( response -> { BulkItemResponse itemResponse = response.iterator().next(); assertThat(itemResponse.getFailure().getMessage(), containsString("fake exception")); @@ -485,7 +486,7 @@ public void testDoExecuteCalledTwiceCorrectly() throws Exception { AtomicBoolean failureCalled = new AtomicBoolean(false); action.needToCheck = true; action.indexCreated = false; - singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap( + ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, ActionListener.wrap( response -> responseCalled.set(true), e -> { assertThat(e, sameInstance(exception)); @@ -517,7 +518,7 @@ public void testNotFindDefaultPipelineFromTemplateMatches(){ indexRequest.source(Collections.emptyMap()); AtomicBoolean responseCalled = new AtomicBoolean(false); AtomicBoolean failureCalled = new AtomicBoolean(false); - singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap( + ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, ActionListener.wrap( response -> responseCalled.set(true), e -> { assertThat(e, sameInstance(exception)); @@ -553,7 +554,7 @@ public void testFindDefaultPipelineFromTemplateMatch(){ indexRequest.source(Collections.emptyMap()); AtomicBoolean responseCalled = new AtomicBoolean(false); AtomicBoolean failureCalled = new AtomicBoolean(false); - singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap( + ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, ActionListener.wrap( response -> responseCalled.set(true), e -> { assertThat(e, sameInstance(exception)); @@ -570,7 +571,7 @@ private void validateDefaultPipeline(IndexRequest indexRequest) { AtomicBoolean responseCalled = new AtomicBoolean(false); AtomicBoolean failureCalled = new AtomicBoolean(false); assertNull(indexRequest.getPipeline()); - singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap( + ActionTestUtils.execute(singleItemBulkWriteAction, null, indexRequest, ActionListener.wrap( response -> { responseCalled.set(true); }, diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 65e4b1ee195ec..dfba68d364b01 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.service.ClusterService; @@ -101,7 +102,7 @@ public void tearDown() throws Exception { public void testDeleteNonExistingDocDoesNotCreateIndex() throws Exception { BulkRequest bulkRequest = new BulkRequest().add(new DeleteRequest("index", "type", "id")); - bulkAction.execute(null, bulkRequest, ActionListener.wrap(response -> { + ActionTestUtils.execute(bulkAction, null, bulkRequest, ActionListener.wrap(response -> { assertFalse(bulkAction.indexCreated); BulkItemResponse[] bulkResponses = ((BulkResponse) response).getItems(); assertEquals(bulkResponses.length, 1); @@ -117,7 +118,7 @@ public void testDeleteNonExistingDocExternalVersionCreatesIndex() throws Excepti BulkRequest bulkRequest = new BulkRequest() .add(new DeleteRequest("index", "type", "id").versionType(VersionType.EXTERNAL).version(0)); - bulkAction.execute(null, bulkRequest, ActionListener.wrap(response -> { + ActionTestUtils.execute(bulkAction, null, bulkRequest, ActionListener.wrap(response -> { assertTrue(bulkAction.indexCreated); }, exception -> { throw new AssertionError(exception); @@ -128,7 +129,7 @@ public void testDeleteNonExistingDocExternalGteVersionCreatesIndex() throws Exce BulkRequest bulkRequest = new BulkRequest() .add(new DeleteRequest("index2", "type", "id").versionType(VersionType.EXTERNAL_GTE).version(0)); - bulkAction.execute(null, bulkRequest, ActionListener.wrap(response -> { + ActionTestUtils.execute(bulkAction, null, bulkRequest, ActionListener.wrap(response -> { assertTrue(bulkAction.indexCreated); }, exception -> { throw new AssertionError(exception); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 55078840153f2..943f937e93162 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; @@ -438,8 +437,7 @@ public void testNoopUpdateRequest() throws Exception { .doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "_doc", "id", 0, - DocWriteResponse.Result.NOOP); + DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "_doc", "id", 0, 2, 1, DocWriteResponse.Result.NOOP); IndexShard shard = mock(IndexShard.class); @@ -471,7 +469,7 @@ public void testNoopUpdateRequest() throws Exception { equalTo(DocWriteResponse.Result.NOOP)); assertThat(bulkShardRequest.items().length, equalTo(1)); assertEquals(primaryRequest, bulkShardRequest.items()[0]); // check that bulk item was not mutated - assertThat(primaryResponse.getResponse().getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); + assertThat(primaryResponse.getResponse().getSeqNo(), equalTo(0L)); } public void testUpdateRequestWithFailure() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java index dbab3104d4494..18c09e75b35c5 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java @@ -21,15 +21,16 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.ArrayUtils; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.function.Consumer; -public class FieldCapabilitiesRequestTests extends AbstractStreamableTestCase { +public class FieldCapabilitiesRequestTests extends AbstractWireSerializingTestCase { @Override protected FieldCapabilitiesRequest createTestInstance() { @@ -55,8 +56,8 @@ protected FieldCapabilitiesRequest createTestInstance() { } @Override - protected FieldCapabilitiesRequest createBlankInstance() { - return new FieldCapabilitiesRequest(); + protected Writeable.Reader instanceReader() { + return FieldCapabilitiesRequest::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index e75dede451b16..0c329bee8191a 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -19,7 +19,8 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.ArrayList; import java.util.Collections; @@ -29,12 +30,7 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; - -public class FieldCapabilitiesResponseTests extends AbstractStreamableTestCase { - @Override - protected FieldCapabilitiesResponse createBlankInstance() { - return new FieldCapabilitiesResponse(); - } +public class FieldCapabilitiesResponseTests extends AbstractWireSerializingTestCase { @Override protected FieldCapabilitiesResponse createTestInstance() { @@ -47,6 +43,11 @@ protected FieldCapabilitiesResponse createTestInstance() { return new FieldCapabilitiesResponse(responses); } + @Override + protected Writeable.Reader instanceReader() { + return FieldCapabilitiesResponse::new; + } + private FieldCapabilitiesIndexResponse createRandomIndexResponse() { Map responses = new HashMap<>(); diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java index 45097af07ea11..656dd5458e809 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java @@ -20,12 +20,13 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.Collections; @@ -33,18 +34,13 @@ import java.util.Map; import java.util.function.Predicate; -public class MergedFieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase { +public class MergedFieldCapabilitiesResponseTests extends AbstractSerializingTestCase { @Override protected FieldCapabilitiesResponse doParseInstance(XContentParser parser) throws IOException { return FieldCapabilitiesResponse.fromXContent(parser); } - @Override - protected FieldCapabilitiesResponse createBlankInstance() { - return new FieldCapabilitiesResponse(); - } - @Override protected FieldCapabilitiesResponse createTestInstance() { // merged responses @@ -71,6 +67,11 @@ protected FieldCapabilitiesResponse createTestInstance() { return new FieldCapabilitiesResponse(indices, responses); } + @Override + protected Writeable.Reader instanceReader() { + return FieldCapabilitiesResponse::new; + } + @Override protected FieldCapabilitiesResponse mutateInstance(FieldCapabilitiesResponse response) { Map> mutatedResponses = new HashMap<>(response.get()); diff --git a/server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java b/server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java index 69a65a361f901..324880fa1bf39 100644 --- a/server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -183,7 +184,7 @@ protected void executeShardAction(final ActionListener listene } }; - transportAction.execute(task, request.request(), new ActionListenerAdapter()); + ActionTestUtils.execute(transportAction, task, request.request(), new ActionListenerAdapter()); assertTrue(shardActionInvoked.get()); } @@ -210,7 +211,7 @@ protected void executeShardAction(final ActionListener listene } }; - transportAction.execute(task, request.request(), new ActionListenerAdapter()); + ActionTestUtils.execute(transportAction, task, request.request(), new ActionListenerAdapter()); assertTrue(shardActionInvoked.get()); } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java index 95424acce77d4..48d4d09400bf7 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java @@ -20,13 +20,14 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.ingest.PipelineConfiguration; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.io.UncheckedIOException; @@ -35,7 +36,7 @@ import java.util.List; import java.util.Map; -public class GetPipelineResponseTests extends AbstractStreamableXContentTestCase { +public class GetPipelineResponseTests extends AbstractSerializingTestCase { private XContentBuilder getRandomXContentBuilder() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); @@ -99,11 +100,6 @@ protected GetPipelineResponse doParseInstance(XContentParser parser) throws IOEx return GetPipelineResponse.fromXContent(parser); } - @Override - protected GetPipelineResponse createBlankInstance() { - return new GetPipelineResponse(); - } - @Override protected GetPipelineResponse createTestInstance() { try { @@ -113,6 +109,11 @@ protected GetPipelineResponse createTestInstance() { } } + @Override + protected Writeable.Reader instanceReader() { + return GetPipelineResponse::new; + } + @Override protected boolean supportsUnknownFields() { return false; diff --git a/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java b/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java index 7f64b3fe585f9..a86ba97ebf5f1 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java @@ -42,8 +42,7 @@ public void testSerializationWithXContent() throws IOException { request.writeTo(output); StreamInput in = StreamInput.wrap(output.bytes().toBytesRef().bytes); - PutPipelineRequest serialized = new PutPipelineRequest(); - serialized.readFrom(in); + PutPipelineRequest serialized = new PutPipelineRequest(in); assertEquals(XContentType.JSON, serialized.getXContentType()); assertEquals("{}", serialized.getSource().utf8ToString()); } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java index 5b85761d44a9b..97306829e5a8d 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java @@ -47,8 +47,7 @@ public void testSerialization() throws IOException { BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); - SimulatePipelineResponse otherResponse = new SimulatePipelineResponse(); - otherResponse.readFrom(streamInput); + SimulatePipelineResponse otherResponse = new SimulatePipelineResponse(streamInput); assertThat(otherResponse.getPipelineId(), equalTo(response.getPipelineId())); assertThat(otherResponse.getResults().size(), equalTo(response.getResults().size())); diff --git a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java index 2ee9cb9e1397e..a22137a6b7fef 100644 --- a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java @@ -23,17 +23,18 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Date; -public class MainResponseTests extends AbstractStreamableXContentTestCase { +public class MainResponseTests extends AbstractSerializingTestCase { @Override protected MainResponse createTestInstance() { @@ -50,8 +51,8 @@ Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBo } @Override - protected MainResponse createBlankInstance() { - return new MainResponse(); + protected Writeable.Reader instanceReader() { + return MainResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index 01f1109ef3bed..19b53e2f8d380 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -159,6 +159,11 @@ public void search(final SearchRequest request, final ActionListener DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, + Collections.emptySet()) { + @Override + public TaskManager getTaskManager() { + return taskManager; + } + }; + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build()); + + String localNodeId = randomAlphaOfLengthBetween(3, 10); + int numSearchRequests = randomIntBetween(1, 100); + MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); + for (int i = 0; i < numSearchRequests; i++) { + multiSearchRequest.add(new SearchRequest()); + } + AtomicInteger counter = new AtomicInteger(0); + Task task = multiSearchRequest.createTask(randomLong(), "type", "action", null, Collections.emptyMap()); + NodeClient client = new NodeClient(settings, threadPool) { + @Override + public void search(final SearchRequest request, final ActionListener listener) { + assertEquals(task.getId(), request.getParentTask().getId()); + assertEquals(localNodeId, request.getParentTask().getNodeId()); + counter.incrementAndGet(); + listener.onResponse(SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY)); + } + + @Override + public String getLocalNodeId() { + return localNodeId; + } + }; + TransportMultiSearchAction action = + new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, 10, System::nanoTime, client); + + PlainActionFuture future = newFuture(); + action.execute(task, multiSearchRequest, future); + future.get(); + assertEquals(numSearchRequests, counter.get()); + } finally { + assertTrue(ESTestCase.terminate(threadPool)); + } + } + + public void testBatchExecute() { // Initialize dependencies of TransportMultiSearchAction Settings settings = Settings.builder() .put("node.name", TransportMultiSearchActionTests.class.getSimpleName()) @@ -123,6 +181,11 @@ public void search(final SearchRequest request, final ActionListener future = PlainActionFuture.newFuture(); - transportAction.execute(new TestRequest(), future); + ActionTestUtils.execute(transportAction, null, new TestRequest(), future); try { assertThat(future.get(), notNullValue()); assertThat("shouldn't get here if an error is expected", errorExpected, equalTo(false)); @@ -172,7 +172,7 @@ protected void doExecute(Task task, TestRequest request, ActionListener failures = new CopyOnWriteArrayList<>(); - transportAction.execute(new TestRequest(), new LatchedActionListener<>(new ActionListener() { + ActionTestUtils.execute(transportAction, null, new TestRequest(), new LatchedActionListener<>(new ActionListener<>() { @Override public void onResponse(TestResponse testResponse) { responses.incrementAndGet(); diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 8aca4d7105eb9..b6fbbc97faea5 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -74,7 +75,6 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -96,16 +96,19 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { private TestTransportBroadcastByNodeAction action; public static class Request extends BroadcastRequest { - public Request() { + + public Request(StreamInput in) throws IOException { + super(in); } - public Request(String[] indices) { + public Request(String... indices) { super(indices); } } public static class Response extends BroadcastResponse { - public Response() { + public Response(StreamInput in) throws IOException { + super(in); } public Response(int totalShards, int successfulShards, int failedShards, List shardFailures) { @@ -118,7 +121,7 @@ class TestTransportBroadcastByNodeAction private final Map shards = new HashMap<>(); TestTransportBroadcastByNodeAction(TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, + IndexNameExpressionResolver indexNameExpressionResolver, Writeable.Reader request, String executor) { super("indices:admin/test", TransportBroadcastByNodeActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request, executor); @@ -138,9 +141,7 @@ protected Response newResponse(Request request, int totalShards, int successfulS @Override protected Request readRequestFrom(StreamInput in) throws IOException { - final Request request = new Request(); - request.readFrom(in); - return request; + return new Request(in); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 45d8f4c8c0bf0..96c272c44022a 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.action.support.master; import org.elasticsearch.action.DocWriteResponse; @@ -26,7 +27,6 @@ import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import java.util.Arrays; @@ -54,7 +54,6 @@ protected Collection> nodePlugins() { * If the master node is being disrupted or if it cannot commit cluster state changes, it needs to retry within timeout limits. * This retry logic is implemented in TransportMasterNodeAction and tested by the following master failover scenario. */ - @TestLogging("_root:DEBUG") public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable { logger.info("--> start 4 nodes, 3 master, 1 data"); @@ -129,4 +128,5 @@ public void run() { refresh(); assertThat(client().prepareSearch("myindex").get().getHits().getTotalHits().value, equalTo(10L)); } + } diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 72d2a85c994d6..51ea048a96cc7 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; @@ -125,6 +126,12 @@ void assertListenerThrows(String msg, ActionFuture listener, Class klass) } public static class Request extends MasterNodeRequest { + Request() {} + + Request(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; @@ -134,6 +141,13 @@ public ActionRequestValidationException validate() { class Response extends ActionResponse { private long identity = randomLong(); + Response() {} + + Response(StreamInput in) throws IOException { + super(in); + identity = in.readLong(); + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -151,19 +165,13 @@ public int hashCode() { public void writeTo(StreamOutput out) throws IOException { out.writeLong(identity); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - identity = in.readLong(); - } } class Action extends TransportMasterNodeAction { Action(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { super(actionName, transportService, clusterService, threadPool, - new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), Request::new); + new ActionFilters(new HashSet<>()), Request::new, new IndexNameExpressionResolver()); } @Override @@ -179,8 +187,8 @@ protected String executor() { } @Override - protected Response newResponse() { - return new Response(); + protected Response read(StreamInput in) throws IOException { + return new Response(in); } @Override @@ -205,7 +213,7 @@ public void testLocalOperationWithoutBlocks() throws ExecutionException, Interru setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); - new Action("internal:testAction", transportService, clusterService, threadPool) { + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { if (masterOperationFailure) { @@ -214,7 +222,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A listener.onResponse(response); } } - }.execute(request, listener); + }, null, request, listener); assertTrue(listener.isDone()); if (masterOperationFailure) { @@ -242,13 +250,13 @@ public void testLocalOperationWithBlocks() throws ExecutionException, Interrupte .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); setState(clusterService, stateWithBlock); - new Action("internal:testAction", transportService, clusterService, threadPool) { + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected ClusterBlockException checkBlock(Request request, ClusterState state) { Set blocks = state.blocks().global(); return blocks.isEmpty() ? null : new ClusterBlockException(blocks); } - }.execute(request, listener); + }, null, request, listener); if (retryableBlock && unblockBeforeTimeout) { assertFalse(listener.isDone()); @@ -284,7 +292,7 @@ public void testCheckBlockThrowsException() throws InterruptedException { .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); setState(clusterService, stateWithBlock); - new Action("internal:testAction", transportService, clusterService, threadPool) { + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected ClusterBlockException checkBlock(Request request, ClusterState state) { Set blocks = state.blocks().global(); @@ -294,7 +302,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) return new ClusterBlockException(blocks); } - }.execute(request, listener); + }, null, request, listener); if (throwExceptionOnRetry == false) { assertListenerThrows("checkBlock has thrown exception", listener, RuntimeException.class); @@ -312,12 +320,12 @@ public void testForceLocalOperation() throws ExecutionException, InterruptedExce setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(localNode, remoteNode, null), allNodes)); - new Action("internal:testAction", transportService, clusterService, threadPool) { + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected boolean localExecute(Request request) { return true; } - }.execute(request, listener); + }, null, request, listener); assertTrue(listener.isDone()); listener.get(); @@ -327,7 +335,7 @@ public void testMasterNotAvailable() throws ExecutionException, InterruptedExcep Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0)); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool), null, request, listener); assertTrue(listener.isDone()); assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class); } @@ -336,7 +344,7 @@ public void testMasterBecomesAvailable() throws ExecutionException, InterruptedE Request request = new Request(); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool), null, request, listener); assertFalse(listener.isDone()); setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); assertTrue(listener.isDone()); @@ -348,7 +356,7 @@ public void testDelegateToMaster() throws ExecutionException, InterruptedExcepti setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool), null, request, listener); assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; @@ -371,7 +379,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted .version(randomIntBetween(0, 10))); // use a random base version so it can go down when simulating a restart. PlainActionFuture listener = new PlainActionFuture<>(); - new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool), null, request, listener); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests.length, equalTo(1)); @@ -444,7 +452,7 @@ public void testMasterFailoverAfterStepDown() throws ExecutionException, Interru setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); - new Action( "internal:testAction", transportService, clusterService, threadPool) { + ActionTestUtils.execute(new Action( "internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { @@ -455,7 +463,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, : new NotMasterException("Fake error"); listener.onFailure(failure); } - }.execute(request, listener); + }, null, request, listener); assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java index fa5b5fa33bff0..8e736a411cd4c 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java @@ -27,7 +27,7 @@ public class TransportMasterNodeActionUtils { /** * Allows to directly call - * {@link TransportMasterNodeAction#masterOperation(org.elasticsearch.tasks.Task, MasterNodeRequest, ClusterState, ActionListener)} + * {@link TransportMasterNodeAction#masterOperation(org.elasticsearch.tasks.Task,MasterNodeRequest, ClusterState, ActionListener)} * which is a protected method. */ public static , Response extends ActionResponse> void runMasterOperation( diff --git a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 59ed774dbb06b..1d57d6f7486ee 100644 --- a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; @@ -57,6 +58,7 @@ import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.mockito.Mockito.mock; public class TransportNodesActionTests extends ESTestCase { @@ -245,8 +247,8 @@ private static class TestTransportNodesAction extends TransportNodesAction { TestTransportNodesAction(ThreadPool threadPool, ClusterService clusterService, TransportService - transportService, ActionFilters actionFilters, Supplier request, - Supplier nodeRequest, String nodeExecutor) { + transportService, ActionFilters actionFilters, Writeable.Reader request, + Writeable.Reader nodeRequest, String nodeExecutor) { super("indices:admin/test", threadPool, clusterService, transportService, actionFilters, request, nodeRequest, nodeExecutor, TestNodeResponse.class); } @@ -263,8 +265,8 @@ protected TestNodeRequest newNodeRequest(TestNodesRequest request) { } @Override - protected TestNodeResponse newNodeResponse() { - return new TestNodeResponse(); + protected TestNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new TestNodeResponse(in); } @Override @@ -278,8 +280,8 @@ private static class DataNodesOnlyTransportNodesAction extends TestTransportNodesAction { DataNodesOnlyTransportNodesAction(ThreadPool threadPool, ClusterService clusterService, TransportService - transportService, ActionFilters actionFilters, Supplier request, - Supplier nodeRequest, String nodeExecutor) { + transportService, ActionFilters actionFilters, Writeable.Reader request, + Writeable.Reader nodeRequest, String nodeExecutor) { super(threadPool, clusterService, transportService, actionFilters, request, nodeRequest, nodeExecutor); } @@ -290,6 +292,9 @@ protected void resolveRequest(TestNodesRequest request, ClusterState clusterStat } private static class TestNodesRequest extends BaseNodesRequest { + TestNodesRequest(StreamInput in) throws IOException { + super(in); + } TestNodesRequest(String... nodesIds) { super(nodesIds); } @@ -307,19 +312,39 @@ private static class TestNodesResponse extends BaseNodesResponse readNodesFrom(StreamInput in) throws IOException { - return in.readStreamableList(TestNodeResponse::new); + return in.readList(TestNodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } } - private static class TestNodeRequest extends BaseNodeRequest { } + private static class TestNodeRequest extends BaseNodeRequest { + TestNodeRequest() {} + TestNodeRequest(StreamInput in) throws IOException { + super(in); + } + } - private static class TestNodeResponse extends BaseNodeResponse { } + private static class TestNodeResponse extends BaseNodeResponse { + TestNodeResponse() { + super(mock(DiscoveryNode.class)); + } + protected TestNodeResponse(StreamInput in) throws IOException { + super(in); + } + } + + private static class OtherNodeResponse extends BaseNodeResponse { + OtherNodeResponse() { + super(mock(DiscoveryNode.class)); + } - private static class OtherNodeResponse extends BaseNodeResponse { } + protected OtherNodeResponse(StreamInput in) throws IOException { + super(in); + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 383b6ed304db0..f2f376de3ca72 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -103,7 +103,7 @@ threadPool, new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_R transportService.start(); transportService.acceptIncomingRequests(); broadcastReplicationAction = new TestBroadcastReplicationAction(clusterService, transportService, - new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), null); + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver()); } @Override @@ -125,7 +125,7 @@ public void testNotStartedPrimary() throws InterruptedException, ExecutionExcept randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); logger.debug("--> using initial state:\n{}", clusterService.state()); PlainActionFuture response = PlainActionFuture.newFuture(); - broadcastReplicationAction.execute(new DummyBroadcastRequest(index), response); + ActionTestUtils.execute(broadcastReplicationAction, null, new DummyBroadcastRequest(index), response); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { if (randomBoolean()) { shardRequests.v2().onFailure(new NoShardAvailableActionException(shardRequests.v1())); @@ -145,7 +145,7 @@ public void testStartedPrimary() throws InterruptedException, ExecutionException ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state()); PlainActionFuture response = PlainActionFuture.newFuture(); - broadcastReplicationAction.execute(new DummyBroadcastRequest(index), response); + ActionTestUtils.execute(broadcastReplicationAction, null, new DummyBroadcastRequest(index), response); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { ReplicationResponse replicationResponse = new ReplicationResponse(); replicationResponse.setShardInfo(new ReplicationResponse.ShardInfo(1, 1)); @@ -161,7 +161,7 @@ public void testResultCombine() throws InterruptedException, ExecutionException, setState(clusterService, stateWithAssignedPrimariesAndOneReplica(index, numShards)); logger.debug("--> using initial state:\n{}", clusterService.state()); PlainActionFuture response = PlainActionFuture.newFuture(); - broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index), response); + ActionTestUtils.execute(broadcastReplicationAction, null, new DummyBroadcastRequest().indices(index), response); int succeeded = 0; int failed = 0; for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { @@ -213,10 +213,9 @@ private class TestBroadcastReplicationAction extends TransportBroadcastReplicati ConcurrentCollections.newConcurrentSet(); TestBroadcastReplicationAction(ClusterService clusterService, TransportService transportService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportReplicationAction action) { + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super("internal:test-broadcast-replication-action", DummyBroadcastRequest::new, clusterService, transportService, - actionFilters, indexNameExpressionResolver, action); + null, actionFilters, indexNameExpressionResolver, null); } @Override @@ -256,7 +255,7 @@ public BroadcastResponse executeAndAssertImmediateResponse( TransportBroadcastReplicationAction broadcastAction, DummyBroadcastRequest request) { PlainActionFuture response = PlainActionFuture.newFuture(); - broadcastAction.execute(request, response); + ActionTestUtils.execute(broadcastAction, null, request, response); return response.actionGet("5s"); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 4459aa5556988..462bec274d78c 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -466,7 +466,7 @@ public void testNoRerouteOnStaleClusterState() { ShardRouting relocationTarget = clusterService.state().getRoutingTable().shardRoutingTable(shardId) .shardsWithState(ShardRoutingState.INITIALIZING).get(0); AllocationService allocationService = ESAllocationTestCase.createAllocationService(); - ClusterState updatedState = allocationService.applyStartedShards(state, Collections.singletonList(relocationTarget)); + ClusterState updatedState = ESAllocationTestCase.startShardsAndReroute(allocationService, state, relocationTarget); setState(clusterService, updatedState); logger.debug("--> relocation complete state:\n{}", clusterService.state()); @@ -1210,11 +1210,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void onRetry() { super.onRetry(); @@ -1228,6 +1223,10 @@ public String toString() { } static class TestResponse extends ReplicationResponse { + TestResponse(StreamInput in) throws IOException { + super(in); + } + TestResponse() { setShardInfo(new ShardInfo()); } @@ -1251,8 +1250,8 @@ private class TestAction extends TransportReplicationAction() { @Override public TransportReplicationAction.ReplicaResponse read(StreamInput in) throws IOException { - final TransportReplicationAction.ReplicaResponse replicaResponse - = new TransportReplicationAction.ReplicaResponse(); - replicaResponse.readFrom(in); - return replicaResponse; + return new TransportReplicationAction.ReplicaResponse(in); } @SuppressWarnings("unchecked") @@ -431,7 +428,7 @@ private abstract class TestAction extends TransportReplicationAction { - public Request() { + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); } } public static class Response extends ActionResponse { - public Response() { + public Response() {} + + public Response(StreamInput in) throws IOException { + super(in); } @Override @@ -94,7 +101,7 @@ class TestTransportInstanceSingleOperationAction extends TransportInstanceSingle TestTransportInstanceSingleOperationAction(String actionName, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Supplier request) { + Writeable.Reader request) { super(actionName, THREAD_POOL, TransportInstanceSingleOperationActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request); } @@ -114,7 +121,7 @@ protected void shardOperation(Request request, ActionListener listener } @Override - protected Response newResponse() { + protected Response newResponse(StreamInput in) throws IOException { return new Response(); } diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsActionTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsActionTests.java index 6ea24123b4796..8541da4eb43d4 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.get.TransportMultiGetActionTests; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -185,7 +186,7 @@ protected void executeShardAction(final ActionListener } }; - transportAction.execute(task, request.request(), new ActionListenerAdapter()); + ActionTestUtils.execute(transportAction, task, request.request(), new ActionListenerAdapter()); assertTrue(shardActionInvoked.get()); } @@ -212,7 +213,7 @@ protected void executeShardAction(final ActionListener } }; - transportAction.execute(task, request.request(), new ActionListenerAdapter()); + ActionTestUtils.execute(transportAction, task, request.request(), new ActionListenerAdapter()); assertTrue(shardActionInvoked.get()); } diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 6549c3a8df5e1..8d1e785c4f43b 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -361,7 +361,7 @@ public void testNowInScript() throws IOException { // We simulate that the document is not existing yet GetResult getResult = new GetResult("test", "type1", "2", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> nowInMillis); - Streamable action = result.action(); + Writeable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); IndexRequest indexAction = (IndexRequest) action; assertEquals(nowInMillis, indexAction.sourceAsMap().get("update_timestamp")); @@ -374,7 +374,7 @@ public void testNowInScript() throws IOException { // We simulate that the document is not existing yet GetResult getResult = new GetResult("test", "type1", "2", 0, 1, 0, true, new BytesArray("{}"), null, null); UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> 42L); - Streamable action = result.action(); + Writeable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); } } @@ -424,7 +424,7 @@ private void runTimeoutTest(final GetResult getResult, final UpdateRequest updat updateRequest, getResult, ESTestCase::randomNonNegativeLong); - final Streamable action = result.action(); + final Writeable action = result.action(); assertThat(action, instanceOf(ReplicationRequest.class)); final ReplicationRequest request = (ReplicationRequest) action; assertThat(request.timeout(), equalTo(updateRequest.timeout())); diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java index babad0276917d..f904024e83f05 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.get.GetResultTests; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -54,7 +55,7 @@ public class UpdateResponseTests extends ESTestCase { public void testToXContent() throws IOException { { - UpdateResponse updateResponse = new UpdateResponse(new ShardId("index", "index_uuid", 0), "type", "id", 0, NOT_FOUND); + UpdateResponse updateResponse = new UpdateResponse(new ShardId("index", "index_uuid", 0), "type", "id", -2, 0, 0, NOT_FOUND); String output = Strings.toString(updateResponse); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":0,\"result\":\"not_found\"," + "\"_shards\":{\"total\":0,\"successful\":0,\"failed\":0}}", output); @@ -162,21 +163,21 @@ public static Tuple randomUpdateResponse(XConten // We also want small number values (randomNonNegativeLong() tend to generate high numbers) // in order to catch some conversion error that happen between int/long after parsing. - Long seqNo = randomFrom(randomNonNegativeLong(), (long) randomIntBetween(0, 10_000), null); - long primaryTerm = seqNo == null ? 0 : randomIntBetween(1, 16); + long seqNo = randomFrom(randomNonNegativeLong(), (long) randomIntBetween(0, 10_000), SequenceNumbers.UNASSIGNED_SEQ_NO); + long primaryTerm = seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO ? SequenceNumbers.UNASSIGNED_PRIMARY_TERM : randomIntBetween(1, 16); ShardId actualShardId = new ShardId(index, indexUUid, shardId); ShardId expectedShardId = new ShardId(index, INDEX_UUID_NA_VALUE, -1); UpdateResponse actual, expected; - if (seqNo != null) { + if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { Tuple shardInfos = RandomObjects.randomShardInfo(random()); actual = new UpdateResponse(shardInfos.v1(), actualShardId, type, id, seqNo, primaryTerm, version, result); expected = new UpdateResponse(shardInfos.v2(), expectedShardId, type, id, seqNo, primaryTerm, version, result); } else { - actual = new UpdateResponse(actualShardId, type, id, version, result); - expected = new UpdateResponse(expectedShardId, type, id, version, result); + actual = new UpdateResponse(actualShardId, type, id, seqNo, primaryTerm, version, result); + expected = new UpdateResponse(expectedShardId, type, id, seqNo, primaryTerm, version, result); } if (actualGetResult.isExists()) { diff --git a/server/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java b/server/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java index a6e74a4770635..7c65bd716af96 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java @@ -23,16 +23,47 @@ import java.util.List; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; public class JavaVersionTests extends ESTestCase { public void testParse() { JavaVersion javaVersion = JavaVersion.parse("1.7.0"); List version = javaVersion.getVersion(); - assertThat(3, is(version.size())); - assertThat(1, is(version.get(0))); - assertThat(7, is(version.get(1))); - assertThat(0, is(version.get(2))); + assertThat(version.size(), is(3)); + assertThat(version.get(0), is(1)); + assertThat(version.get(1), is(7)); + assertThat(version.get(2), is(0)); + + JavaVersion javaVersionEarlyAccess = JavaVersion.parse("14.0.1-ea"); + List version14 = javaVersionEarlyAccess.getVersion(); + assertThat(version14.size(), is(3)); + assertThat(version14.get(0), is(14)); + assertThat(version14.get(1), is(0)); + assertThat(version14.get(2), is(1)); + + JavaVersion javaVersionOtherPrePart = JavaVersion.parse("13.2.4-somethingElseHere"); + List version13 = javaVersionOtherPrePart.getVersion(); + assertThat(version13.size(), is(3)); + assertThat(version13.get(0), is(13)); + assertThat(version13.get(1), is(2)); + assertThat(version13.get(2), is(4)); + + JavaVersion javaVersionNumericPrePart = JavaVersion.parse("13.2.4-something124443"); + List version11 = javaVersionNumericPrePart.getVersion(); + assertThat(version11.size(), is(3)); + assertThat(version11.get(0), is(13)); + assertThat(version11.get(1), is(2)); + assertThat(version11.get(2), is(4)); + } + + public void testParseInvalidVersions() { + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> JavaVersion.parse("11.2-something-else")); + assertThat(e.getMessage(), equalTo("Java version string [11.2-something-else] could not be parsed.")); + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> JavaVersion.parse("11.0.")); + assertThat(e1.getMessage(), equalTo("Java version string [11.0.] could not be parsed.")); + final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, () -> JavaVersion.parse("11.a.3")); + assertThat(e2.getMessage(), equalTo("Java version string [11.a.3] could not be parsed.")); } public void testToString() { @@ -40,6 +71,12 @@ public void testToString() { assertThat(javaVersion170.toString(), is("1.7.0")); JavaVersion javaVersion9 = JavaVersion.parse("9"); assertThat(javaVersion9.toString(), is("9")); + JavaVersion javaVersion11 = JavaVersion.parse("11.0.1-something09random"); + assertThat(javaVersion11.toString(), is("11.0.1-something09random")); + JavaVersion javaVersion12 = JavaVersion.parse("12.2-2019"); + assertThat(javaVersion12.toString(), is("12.2-2019")); + JavaVersion javaVersion13ea = JavaVersion.parse("13.1-ea"); + assertThat(javaVersion13ea.toString(), is("13.1-ea")); } public void testCompare() { @@ -50,6 +87,15 @@ public void testCompare() { JavaVersion onePointSevenPointTwo = JavaVersion.parse("1.7.2"); JavaVersion onePointSevenPointOnePointOne = JavaVersion.parse("1.7.1.1"); JavaVersion onePointSevenPointTwoPointOne = JavaVersion.parse("1.7.2.1"); + JavaVersion thirteen = JavaVersion.parse("13"); + JavaVersion thirteenPointTwoPointOne = JavaVersion.parse("13.2.1"); + JavaVersion thirteenPointTwoPointOneTwoThousand = JavaVersion.parse("13.2.1-2000"); + JavaVersion thirteenPointTwoPointOneThreeThousand = JavaVersion.parse("13.2.1-3000"); + JavaVersion thirteenPointTwoPointOneA = JavaVersion.parse("13.2.1-aaa"); + JavaVersion thirteenPointTwoPointOneB = JavaVersion.parse("13.2.1-bbb"); + JavaVersion fourteen = JavaVersion.parse("14"); + JavaVersion fourteenPointTwoPointOne = JavaVersion.parse("14.2.1"); + JavaVersion fourteenPointTwoPointOneEarlyAccess = JavaVersion.parse("14.2.1-ea"); assertTrue(onePointSix.compareTo(onePointSeven) < 0); assertTrue(onePointSeven.compareTo(onePointSix) > 0); @@ -57,17 +103,27 @@ public void testCompare() { assertTrue(onePointSeven.compareTo(onePointSevenPointZero) == 0); assertTrue(onePointSevenPointOnePointOne.compareTo(onePointSevenPointOne) > 0); assertTrue(onePointSevenPointTwo.compareTo(onePointSevenPointTwoPointOne) < 0); + assertTrue(thirteen.compareTo(thirteenPointTwoPointOne) < 0); + assertTrue(thirteen.compareTo(fourteen) < 0); + assertTrue(thirteenPointTwoPointOneThreeThousand.compareTo(thirteenPointTwoPointOneTwoThousand) > 0); + assertTrue(thirteenPointTwoPointOneThreeThousand.compareTo(thirteenPointTwoPointOneThreeThousand) == 0); + assertTrue(thirteenPointTwoPointOneA.compareTo(thirteenPointTwoPointOneA) == 0); + assertTrue(thirteenPointTwoPointOneA.compareTo(thirteenPointTwoPointOneB) < 0); + assertTrue(thirteenPointTwoPointOneA.compareTo(thirteenPointTwoPointOneThreeThousand) > 0); + assertTrue(fourteenPointTwoPointOneEarlyAccess.compareTo(fourteenPointTwoPointOne) < 0); + assertTrue(fourteenPointTwoPointOneEarlyAccess.compareTo(fourteen) > 0); + } public void testValidVersions() { - String[] versions = new String[]{"1.7", "1.7.0", "0.1.7", "1.7.0.80"}; + String[] versions = new String[]{"1.7", "1.7.0", "0.1.7", "1.7.0.80", "12-ea", "13.0.2.3-ea", "14-something", "11.0.2-21002"}; for (String version : versions) { assertTrue(JavaVersion.isValid(version)); } } public void testInvalidVersions() { - String[] versions = new String[]{"", "1.7.0_80", "1.7."}; + String[] versions = new String[]{"", "1.7.0_80", "1.7.", "11.2-something-else"}; for (String version : versions) { assertFalse(JavaVersion.isValid(version)); } @@ -76,4 +132,4 @@ public void testInvalidVersions() { public void testJava8Compat() { assertEquals(JavaVersion.parse("1.8"), JavaVersion.parse("8")); } -} \ No newline at end of file +} diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java index d0680e91b8b73..656961411ea5e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -19,10 +19,12 @@ package org.elasticsearch.cluster; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -38,14 +40,16 @@ public class ClusterHealthIT extends ESIntegTestCase { public void testSimpleLocalHealth() { createIndex("test"); - ensureGreen(); // master should thing it's green now. + ensureGreen(); // master should think it's green now. - for (String node : internalCluster().getNodeNames()) { + for (final String node : internalCluster().getNodeNames()) { // a very high time out, which should never fire due to the local flag - ClusterHealthResponse health = client(node).admin().cluster().prepareHealth().setLocal(true) + logger.info("--> getting cluster health on [{}]", node); + final ClusterHealthResponse health = client(node).admin().cluster().prepareHealth().setLocal(true) .setWaitForEvents(Priority.LANGUID).setTimeout("30s").get("10s"); - assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); - assertThat(health.isTimedOut(), equalTo(false)); + logger.info("--> got cluster health on [{}]", node); + assertFalse("timed out on " + node, health.isTimedOut()); + assertThat("health status on " + node, health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); } } @@ -254,4 +258,40 @@ public void run() { clusterHealthThread.join(); } + public void testWaitForEventsRetriesIfOtherConditionsNotMet() throws Exception { + final ActionFuture healthResponseFuture + = client().admin().cluster().prepareHealth("index").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute(); + + final AtomicBoolean keepSubmittingTasks = new AtomicBoolean(true); + final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + clusterService.submitStateUpdateTask("looping task", new ClusterStateUpdateTask(Priority.LOW) { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void onFailure(String source, Exception e) { + throw new AssertionError(source, e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (keepSubmittingTasks.get()) { + clusterService.submitStateUpdateTask("looping task", this); + } + } + }); + + createIndex("index"); + assertFalse(client().admin().cluster().prepareHealth("index").setWaitForGreenStatus().get().isTimedOut()); + + // at this point the original health response should not have returned: there was never a point where the index was green AND + // the master had processed all pending tasks above LANGUID priority. + assertFalse(healthResponseFuture.isDone()); + + keepSubmittingTasks.set(false); + assertFalse(healthResponseFuture.get().isTimedOut()); + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java b/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java index 50bddc8677b62..9da25a4cc6781 100644 --- a/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java @@ -30,7 +30,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collections; import java.util.EnumSet; import java.util.Optional; @@ -111,9 +110,7 @@ public void testVersion() { final BytesStreamOutput out = new BytesStreamOutput(); final Version afterVersion = randomVersionBetween(random(), version, Version.CURRENT); out.setVersion(afterVersion); - if (custom.getRequiredFeature().isPresent()) { - out.setFeatures(Collections.singleton(custom.getRequiredFeature().get())); - } + custom.getRequiredFeature(); assertTrue(FeatureAware.shouldSerialize(out, custom)); } { @@ -121,9 +118,6 @@ public void testVersion() { final Version beforeVersion = randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(version)); out.setVersion(beforeVersion); - if (custom.getRequiredFeature().isPresent() && randomBoolean()) { - out.setFeatures(Collections.singleton(custom.getRequiredFeature().get())); - } assertFalse(FeatureAware.shouldSerialize(out, custom)); } } @@ -138,7 +132,6 @@ public void testFeature() { final BytesStreamOutput out = new BytesStreamOutput(); out.setVersion(afterVersion); assertTrue(custom.getRequiredFeature().isPresent()); - out.setFeatures(Collections.singleton(custom.getRequiredFeature().get())); assertTrue(FeatureAware.shouldSerialize(out, custom)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 377cbef84a4c6..e84b8391a7e6c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -131,7 +131,7 @@ public void testConnectAndDisconnect() throws Exception { service.connectToNodes(nodes, () -> future.onResponse(null)); future.actionGet(); if (isDisrupting == false) { - assertConnected(nodes); + assertConnected(transportService, nodes); } service.disconnectFromNodesExcept(nodes); @@ -169,6 +169,11 @@ public void testPeriodicReconnection() { final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(builder().put(NODE_NAME_SETTING.getKey(), "node").build(), random()); + MockTransport transport = new MockTransport(deterministicTaskQueue.getThreadPool()); + TestTransportService transportService = new TestTransportService(transport, deterministicTaskQueue.getThreadPool()); + transportService.start(); + transportService.acceptIncomingRequests(); + final NodeConnectionsService service = new NodeConnectionsService(settings.build(), deterministicTaskQueue.getThreadPool(), transportService); service.start(); @@ -211,7 +216,7 @@ public String toString() { transport.randomConnectionExceptions = false; logger.info("renewing connections"); runTasksUntil(deterministicTaskQueue, maxDisconnectionTime + reconnectIntervalMillis); - assertConnectedExactlyToNodes(targetNodes); + assertConnectedExactlyToNodes(transportService, targetNodes); } public void testOnlyBlocksOnConnectionsToNewNodes() throws Exception { @@ -314,11 +319,15 @@ private void ensureConnections(NodeConnectionsService service) { } private void assertConnectedExactlyToNodes(DiscoveryNodes discoveryNodes) { - assertConnected(discoveryNodes); + assertConnectedExactlyToNodes(transportService, discoveryNodes); + } + + private void assertConnectedExactlyToNodes(TransportService transportService, DiscoveryNodes discoveryNodes) { + assertConnected(transportService, discoveryNodes); assertThat(transportService.getConnectionManager().size(), equalTo(discoveryNodes.getSize())); } - private void assertConnected(Iterable nodes) { + private void assertConnected(TransportService transportService, Iterable nodes) { for (DiscoveryNode node : nodes) { assertTrue("not connected to " + node, transportService.nodeConnected(node)); } @@ -328,8 +337,9 @@ private void assertConnected(Iterable nodes) { @Before public void setUp() throws Exception { super.setUp(); - this.threadPool = new TestThreadPool(getClass().getName()); - this.transport = new MockTransport(); + ThreadPool threadPool = new TestThreadPool(getClass().getName()); + this.threadPool = threadPool; + this.transport = new MockTransport(threadPool); nodeConnectionBlocks = newConcurrentMap(); transportService = new TestTransportService(transport, threadPool); transportService.start(); @@ -361,21 +371,35 @@ public void handshake(Transport.Connection connection, long timeout, Predicate listener) throws ConnectTransportException { final CheckedRunnable connectionBlock = nodeConnectionBlocks.get(node); if (connectionBlock != null) { - try { - connectionBlock.run(); - } catch (Exception e) { - throw new AssertionError(e); - } + getThreadPool().generic().execute(() -> { + try { + connectionBlock.run(); + super.connectToNode(node, listener); + } catch (Exception e) { + throw new AssertionError(e); + } + }); + } else { + super.connectToNode(node, listener); } - super.connectToNode(node); } } private final class MockTransport implements Transport { private ResponseHandlers responseHandlers = new ResponseHandlers(); private volatile boolean randomConnectionExceptions = false; + private final ThreadPool threadPool; + + MockTransport(ThreadPool threadPool) { + this.threadPool = threadPool; + } @Override public void registerRequestHandler(RequestHandlerRegistry reg) { diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 4dbe62cf5cebb..54f080aeef2b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; @@ -50,14 +49,12 @@ import org.elasticsearch.index.shard.ShardId; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.contains; @@ -165,14 +162,14 @@ public void testIllegalShardFailureRequests() throws Exception { public void testMarkAsStaleWhenFailingShard() throws Exception { final MockAllocationService allocation = createAllocationService(); ClusterState clusterState = createClusterStateWithStartedShards("test markAsStale"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index(INDEX).shard(0); long primaryTerm = clusterState.metaData().index(INDEX).primaryTerm(0); final Set oldInSync = clusterState.metaData().index(INDEX).inSyncAllocationIds(0); { ShardStateAction.FailedShardEntry failShardOnly = new ShardStateAction.FailedShardEntry(shardRoutingTable.shardId(), randomFrom(oldInSync), primaryTerm, "dummy", null, false); - ClusterState appliedState = executor.execute(clusterState, Arrays.asList(failShardOnly)).resultingState; + ClusterState appliedState = executor.execute(clusterState, Collections.singletonList(failShardOnly)).resultingState; Set newInSync = appliedState.metaData().index(INDEX).inSyncAllocationIds(0); assertThat(newInSync, equalTo(oldInSync)); } @@ -180,7 +177,7 @@ public void testMarkAsStaleWhenFailingShard() throws Exception { final String failedAllocationId = randomFrom(oldInSync); ShardStateAction.FailedShardEntry failAndMarkAsStale = new ShardStateAction.FailedShardEntry(shardRoutingTable.shardId(), failedAllocationId, primaryTerm, "dummy", null, true); - ClusterState appliedState = executor.execute(clusterState, Arrays.asList(failAndMarkAsStale)).resultingState; + ClusterState appliedState = executor.execute(clusterState, Collections.singletonList(failAndMarkAsStale)).resultingState; Set newInSync = appliedState.metaData().index(INDEX).inSyncAllocationIds(0); assertThat(Sets.difference(oldInSync, newInSync), contains(failedAllocationId)); } @@ -192,11 +189,9 @@ private ClusterState createClusterStateWithStartedShards(String reason) { IntStream.rangeClosed(1, numberOfNodes).mapToObj(node -> newNode("node" + node)).forEach(nodes::add); ClusterState stateAfterAddingNode = ClusterState.builder(clusterState).nodes(nodes).build(); - RoutingTable afterReroute = - allocationService.reroute(stateAfterAddingNode, reason).routingTable(); + RoutingTable afterReroute = allocationService.reroute(stateAfterAddingNode, reason).routingTable(); ClusterState stateAfterReroute = ClusterState.builder(stateAfterAddingNode).routingTable(afterReroute).build(); - RoutingNodes routingNodes = stateAfterReroute.getRoutingNodes(); - return allocationService.applyStartedShards(stateAfterReroute, routingNodes.shardsWithState(ShardRoutingState.INITIALIZING)); + return ESAllocationTestCase.startInitializingShardsAndReroute(allocationService, stateAfterReroute); } private List createExistingShards(ClusterState currentState, String reason) { diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java index 20b7548004f4a..51ee06b0f3e0e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.action.shard; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -29,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; @@ -53,13 +55,18 @@ public class ShardStartedClusterStateTaskExecutorTests extends ESAllocationTestC private ShardStateAction.ShardStartedClusterStateTaskExecutor executor; + private static void neverReroutes(String reason, Priority priority, ActionListener listener) { + fail("unexpectedly ran a deferred reroute"); + } + @Override public void setUp() throws Exception { super.setUp(); AllocationService allocationService = createAllocationService(Settings.builder() .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), Integer.MAX_VALUE) .build()); - executor = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, logger); + executor = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, + ShardStartedClusterStateTaskExecutorTests::neverReroutes, logger); } public void testEmptyTaskListProducesSameClusterState() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationStateTests.java index 99f04015867f5..944b1157978de 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationStateTests.java @@ -41,6 +41,8 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -734,6 +736,11 @@ public void testHandleCommitWithBadVersion() { public void testVoteCollection() { final CoordinationState.VoteCollection voteCollection = new CoordinationState.VoteCollection(); assertTrue(voteCollection.isEmpty()); + + assertFalse(voteCollection.addVote( + new DiscoveryNode("master-ineligible", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT))); + assertTrue(voteCollection.isEmpty()); + voteCollection.addVote(node1); assertFalse(voteCollection.isEmpty()); assertTrue(voteCollection.containsVoteFor(node1)); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 26d32ce91f908..885e33ddc8a00 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.node.Node; import org.elasticsearch.test.MockLogAppender; import java.io.IOException; @@ -1206,6 +1207,61 @@ public void assertMatched() { } } + public void testReconfiguresToExcludeMasterIneligibleNodesInVotingConfig() { + final Cluster cluster = new Cluster(3); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode chosenNode = cluster.getAnyNode(); + + assertThat(cluster.getAnyLeader().getLastAppliedClusterState().getLastCommittedConfiguration().getNodeIds(), + hasItem(chosenNode.getId())); + assertThat(cluster.getAnyLeader().getLastAppliedClusterState().getLastAcceptedConfiguration().getNodeIds(), + hasItem(chosenNode.getId())); + + final boolean chosenNodeIsLeader = chosenNode == cluster.getAnyLeader(); + final long termBeforeRestart = cluster.getAnyNode().coordinator.getCurrentTerm(); + logger.info("--> restarting [{}] as a master-ineligible node", chosenNode); + + chosenNode.close(); + cluster.clusterNodes.replaceAll(cn -> cn == chosenNode ? cn.restartedNode(Function.identity(), Function.identity(), + Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()) : cn); + cluster.stabilise(); + + if (chosenNodeIsLeader == false) { + assertThat("term did not change", cluster.getAnyNode().coordinator.getCurrentTerm(), is(termBeforeRestart)); + } + + assertThat(cluster.getAnyLeader().getLastAppliedClusterState().getLastCommittedConfiguration().getNodeIds(), + not(hasItem(chosenNode.getId()))); + assertThat(cluster.getAnyLeader().getLastAppliedClusterState().getLastAcceptedConfiguration().getNodeIds(), + not(hasItem(chosenNode.getId()))); + } + + public void testDoesNotPerformElectionWhenRestartingFollower() { + final Cluster cluster = new Cluster(randomIntBetween(2, 5), false, Settings.EMPTY); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode leader = cluster.getAnyLeader(); + final long expectedTerm = leader.coordinator.getCurrentTerm(); + + if (cluster.clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()).count() == 2) { + // in the 2-node case, auto-shrinking the voting configuration is required to reduce the voting configuration down to just the + // leader, otherwise restarting the other master-eligible node triggers an election + leader.submitSetAutoShrinkVotingConfiguration(true); + cluster.stabilise(2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY); // first delay for the setting update, second for the reconfiguration + } + + for (final ClusterNode clusterNode : cluster.getAllNodesExcept(leader)) { + logger.info("--> restarting {}", clusterNode); + clusterNode.close(); + cluster.clusterNodes.replaceAll(cn -> + cn == clusterNode ? cn.restartedNode(Function.identity(), Function.identity(), Settings.EMPTY) : cn); + cluster.stabilise(); + assertThat("term should not change", cluster.getAnyNode().coordinator.getCurrentTerm(), is(expectedTerm)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index 70567da1aadfb..ef7567ea5df91 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -57,7 +57,7 @@ public void testJoinDeduplication() { x -> localNode, null, Collections.emptySet()); JoinHelper joinHelper = new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> null, (joinRequest, joinCallback) -> { throw new AssertionError(); }, startJoinRequest -> { throw new AssertionError(); }, - Collections.emptyList(), (s, r) -> {}); + Collections.emptyList(), (s, p, r) -> {}); transportService.start(); DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT); @@ -153,7 +153,7 @@ public void testJoinValidationRejectsMismatchedClusterUUID() { x -> localNode, null, Collections.emptySet()); new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> localClusterState, (joinRequest, joinCallback) -> { throw new AssertionError(); }, startJoinRequest -> { throw new AssertionError(); }, - Collections.emptyList(), (s, r) -> {}); // registers request handler + Collections.emptyList(), (s, p, r) -> {}); // registers request handler transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java index 37145e991b6c1..8ac0c0455e5e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java @@ -174,7 +174,7 @@ transportService, writableRegistry(), () -> new InMemoryPersistedState(term, initialState), r -> emptyList(), new NoOpClusterApplier(), Collections.emptyList(), - random, (s, r) -> {}, ElectionStrategy.DEFAULT_INSTANCE); + random, (s, p, r) -> {}, ElectionStrategy.DEFAULT_INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); transport = capturingTransport; @@ -472,12 +472,16 @@ public void testBecomeFollowerFailsPendingJoin() throws Exception { } public void testConcurrentJoining() { - List nodes = IntStream.rangeClosed(1, randomIntBetween(2, 5)) + List masterNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5)) .mapToObj(nodeId -> newNode(nodeId, true)).collect(Collectors.toList()); + List otherNodes = IntStream.rangeClosed(masterNodes.size() + 1, masterNodes.size() + 1 + randomIntBetween(0, 5)) + .mapToObj(nodeId -> newNode(nodeId, false)).collect(Collectors.toList()); + List allNodes = Stream.concat(masterNodes.stream(), otherNodes.stream()).collect(Collectors.toList()); - DiscoveryNode localNode = nodes.get(0); + DiscoveryNode localNode = masterNodes.get(0); VotingConfiguration votingConfiguration = new VotingConfiguration(randomValueOtherThan(singletonList(localNode), - () -> randomSubsetOf(randomIntBetween(1, nodes.size()), nodes)).stream().map(DiscoveryNode::getId).collect(Collectors.toSet())); + () -> randomSubsetOf(randomIntBetween(1, masterNodes.size()), masterNodes)).stream() + .map(DiscoveryNode::getId).collect(Collectors.toSet())); logger.info("Voting configuration: {}", votingConfiguration); @@ -489,7 +493,7 @@ public void testConcurrentJoining() { // we need at least a quorum of voting nodes with a correct term and worse state List successfulNodes; do { - successfulNodes = randomSubsetOf(nodes); + successfulNodes = randomSubsetOf(allNodes); } while (votingConfiguration.hasQuorum(successfulNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toList())) == false); @@ -499,7 +503,7 @@ public void testConcurrentJoining() { node -> new JoinRequest(node, Optional.of(new Join(node, localNode, newTerm, initialTerm, initialVersion)))) .collect(Collectors.toList()); - List possiblyUnsuccessfulNodes = new ArrayList<>(nodes); + List possiblyUnsuccessfulNodes = new ArrayList<>(allNodes); possiblyUnsuccessfulNodes.removeAll(successfulNodes); logger.info("Possibly unsuccessful voting nodes: {}", possiblyUnsuccessfulNodes); @@ -572,8 +576,8 @@ public void testConcurrentJoining() { assertTrue(MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster()); for (DiscoveryNode successfulNode : successfulNodes) { - assertTrue(successfulNode.toString(), clusterStateHasNode(successfulNode)); - assertTrue(successfulNode.toString(), coordinator.hasJoinVoteFrom(successfulNode)); + assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode)); + assertFalse(successfulNode + " voted for master", coordinator.missingJoinVoteFrom(successfulNode)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index a475009b5a285..1a6136f090ce4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; @@ -143,7 +144,7 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte TransportClusterHealthAction action = new TransportClusterHealthAction(transportService, clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, new TestGatewayAllocator()); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new ClusterHealthRequest().waitForGreenStatus(), listener); + ActionTestUtils.execute(action, null, new ClusterHealthRequest().waitForGreenStatus(), listener); assertFalse(listener.isDone()); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 3eef8ff686eef..667909d644beb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -24,13 +24,13 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -164,8 +164,7 @@ public void testValidateShrinkIndex() { RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard - routingTable = service.applyStartedShards(clusterState, - routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); int targetShards; do { @@ -234,8 +233,7 @@ public void testValidateSplitIndex() { RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard - routingTable = service.applyStartedShards(clusterState, - routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); MetaDataCreateIndexService.validateSplitIndex(clusterState, "source", Collections.emptySet(), "target", @@ -370,9 +368,8 @@ private void runPrepareResizeIndexSettingsTest( final ClusterState routingTableClusterState = ClusterState.builder(initialClusterState).routingTable(initialRoutingTable).build(); // now we start the shard - final RoutingTable routingTable = service.applyStartedShards( - routingTableClusterState, - initialRoutingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + final RoutingTable routingTable + = ESAllocationTestCase.startInitializingShardsAndReroute(service, routingTableClusterState, indexName).routingTable(); final ClusterState clusterState = ClusterState.builder(routingTableClusterState).routingTable(routingTable).build(); final Settings.Builder indexSettingsBuilder = Settings.builder().put("index.number_of_shards", 1).put(requestSettings); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index 3724f47537428..47fb9e3e3bd17 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.mapper.MapperRegistry; @@ -106,9 +107,9 @@ public void testFailUpgrade() { .build()); String message = expectThrows(IllegalStateException.class, () -> service.upgradeIndexMetaData(metaData, Version.CURRENT.minimumIndexCompatibilityVersion())).getMessage(); - assertEquals(message, "The index [[foo/BOOM]] was created with version [" + indexCreated + "] " + + assertThat(message, equalTo("The index [foo/" + metaData.getIndexUUID() + "] was created with version [" + indexCreated + "] " + "but the minimum compatible version is [" + minCompat + "]." + - " It should be re-indexed in Elasticsearch " + minCompat.major + ".x before upgrading to " + Version.CURRENT.toString() + "."); + " It should be re-indexed in Elasticsearch " + minCompat.major + ".x before upgrading to " + Version.CURRENT.toString() + ".")); indexCreated = VersionUtils.randomVersionBetween(random(), minCompat, Version.CURRENT); indexUpgraded = VersionUtils.randomVersionBetween(random(), indexCreated, Version.CURRENT); @@ -160,15 +161,25 @@ private MetaDataIndexUpgradeService getMetaDataIndexUpgradeService() { } public static IndexMetaData newIndexMeta(String name, Settings indexSettings) { - Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_CREATION_DATE, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, "BOOM") - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT.minimumIndexCompatibilityVersion()) + final Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, randomEarlierCompatibleVersion()) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, between(0, 5)) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, between(1, 5)) + .put(IndexMetaData.SETTING_CREATION_DATE, randomNonNegativeLong()) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, randomEarlierCompatibleVersion()) .put(indexSettings) .build(); - return IndexMetaData.builder(name).settings(build).build(); + final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(name).settings(settings); + if (randomBoolean()) { + indexMetaDataBuilder.state(IndexMetaData.State.CLOSE); + } + return indexMetaDataBuilder.build(); + } + + private static Version randomEarlierCompatibleVersion() { + return randomValueOtherThan(Version.CURRENT, () -> VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index d92624f539c9e..32e93a09eb6d1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -52,7 +52,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Set; -import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -174,7 +173,7 @@ public void checkHealthStatus(String indexName, ClusterHealthStatus healthStatus assertThat(indexHealthStatus, is(healthStatus)); } - private int indexDocs(String indexName, Object ... source) throws InterruptedException, ExecutionException { + private int indexDocs(String indexName, Object ... source) throws InterruptedException { // index some docs in several segments int numDocs = 0; for (int k = 0, attempts = randomIntBetween(5, 10); k < attempts; k++) { @@ -192,9 +191,7 @@ private int indexDocs(String indexName, Object ... source) throws InterruptedExc } private Path getIndexPath(String nodeName, ShardId shardId) { - final Set indexDirs = RemoveCorruptedShardDataCommandIT.getDirs(nodeName, shardId, ShardPath.INDEX_FOLDER_NAME); - assertThat(indexDirs, hasSize(1)); - return indexDirs.iterator().next(); + return RemoveCorruptedShardDataCommandIT.getPathToShardData(nodeName, shardId, ShardPath.INDEX_FOLDER_NAME); } private Set getAllocationIds(String indexName) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java index 966ac1e60650d..c29174d055d05 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java @@ -25,6 +25,8 @@ import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Randomness; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -32,12 +34,16 @@ import org.junit.After; import org.junit.Before; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import static org.hamcrest.Matchers.lessThan; @@ -70,13 +76,14 @@ public void testReroutesWhenRequested() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(iterations); for (int i = 0; i < iterations; i++) { rerouteCountBeforeReroute = Math.max(rerouteCountBeforeReroute, rerouteCount.get()); - batchedRerouteService.reroute("iteration " + i, ActionListener.wrap(countDownLatch::countDown)); + batchedRerouteService.reroute("iteration " + i, randomFrom(EnumSet.allOf(Priority.class)), + ActionListener.wrap(countDownLatch::countDown)); } countDownLatch.await(10, TimeUnit.SECONDS); assertThat(rerouteCountBeforeReroute, lessThan(rerouteCount.get())); } - public void testBatchesReroutesTogether() throws BrokenBarrierException, InterruptedException { + public void testBatchesReroutesTogetherAtPriorityOfHighestSubmittedReroute() throws BrokenBarrierException, InterruptedException { final CyclicBarrier cyclicBarrier = new CyclicBarrier(2); clusterService.submitStateUpdateTask("block master service", new ClusterStateUpdateTask() { @Override @@ -100,14 +107,77 @@ public void onFailure(String source, Exception e) { return s; }); - final int iterations = between(1, 100); - final CountDownLatch countDownLatch = new CountDownLatch(iterations); - for (int i = 0; i < iterations; i++) { - batchedRerouteService.reroute("iteration " + i, ActionListener.wrap(countDownLatch::countDown)); + final int iterations = scaledRandomIntBetween(1, 100); + final CountDownLatch tasksSubmittedCountDown = new CountDownLatch(iterations); + final CountDownLatch tasksCompletedCountDown = new CountDownLatch(iterations); + final List actions = new ArrayList<>(iterations); + final Function rerouteFromPriority = priority -> () -> { + final AtomicBoolean alreadyRun = new AtomicBoolean(); + batchedRerouteService.reroute("reroute at " + priority, priority, ActionListener.wrap(() -> { + assertTrue(alreadyRun.compareAndSet(false, true)); + tasksCompletedCountDown.countDown(); + })); + tasksSubmittedCountDown.countDown(); + }; + actions.add(rerouteFromPriority.apply(Priority.URGENT)); // ensure at least one URGENT priority reroute + for (int i = 1; i < iterations; i++) { + final int iteration = i; + if (randomBoolean()) { + actions.add(rerouteFromPriority.apply(randomFrom(Priority.LOW, Priority.NORMAL, Priority.HIGH, Priority.URGENT))); + } else { + final Priority priority = randomFrom(Priority.NORMAL, Priority.HIGH, Priority.URGENT, Priority.IMMEDIATE); + final boolean submittedConcurrentlyWithReroute = randomBoolean(); + if (submittedConcurrentlyWithReroute == false) { + tasksSubmittedCountDown.countDown(); // this task might be submitted later + } + actions.add(() -> { + clusterService.submitStateUpdateTask("other task " + iteration + " at " + priority, + new ClusterStateUpdateTask(priority) { + + @Override + public ClusterState execute(ClusterState currentState) { + switch (priority) { + case IMMEDIATE: + if (submittedConcurrentlyWithReroute) { + assertFalse("should have rerouted after " + priority + " priority task", rerouteExecuted.get()); + } // else this task might be submitted too late to precede the reroute + break; + case URGENT: + // may run either before or after reroute + break; + case HIGH: + case NORMAL: + assertTrue("should have rerouted before " + priority + " priority task", rerouteExecuted.get()); + break; + default: + fail("unexpected priority: " + priority); + break; + } + return currentState; + } + + @Override + public void onFailure(String source, Exception e) { + throw new AssertionError(source, e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + tasksCompletedCountDown.countDown(); + } + }); + if (submittedConcurrentlyWithReroute) { + tasksSubmittedCountDown.countDown(); + } + }); + } } + Randomness.shuffle(actions); + actions.forEach(threadPool.generic()::execute); + assertTrue(tasksSubmittedCountDown.await(10, TimeUnit.SECONDS)); cyclicBarrier.await(); // allow master thread to continue; - countDownLatch.await(); // wait for reroute to complete + assertTrue(tasksCompletedCountDown.await(10, TimeUnit.SECONDS)); // wait for reroute to complete assertTrue(rerouteExecuted.get()); // see above for assertion that it's only called once } @@ -123,7 +193,19 @@ public void testNotifiesOnFailure() throws InterruptedException { final int iterations = between(1, 100); final CountDownLatch countDownLatch = new CountDownLatch(iterations); for (int i = 0; i < iterations; i++) { - batchedRerouteService.reroute("iteration " + i, ActionListener.wrap(countDownLatch::countDown)); + batchedRerouteService.reroute("iteration " + i, + randomFrom(EnumSet.allOf(Priority.class)), ActionListener.wrap( + r -> { + countDownLatch.countDown(); + if (rarely()) { + throw new ElasticsearchException("failure during notification"); + } + }, e -> { + countDownLatch.countDown(); + if (randomBoolean()) { + throw new ElasticsearchException("failure during failure notification", e); + } + })); if (rarely()) { clusterService.getMasterService().setClusterStatePublisher( randomBoolean() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java index 098803e3cfd53..4ebd5eb59ad45 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java @@ -45,7 +45,6 @@ import static java.util.Collections.singleton; import static org.elasticsearch.cluster.routing.DelayedAllocationService.CLUSTER_UPDATE_TASK_SOURCE; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; @@ -92,9 +91,9 @@ public void testNoDelayedUnassigned() throws Exception { .build(); clusterState = allocationService.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); // starting replicas - clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); ClusterState prevState = clusterState; // remove node2 and reroute @@ -136,9 +135,9 @@ public void testDelayedUnassignedScheduleReroute() throws Exception { allocationService.setNanoTimeOverride(baseTimestampNanos); clusterState = allocationService.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); // starting replicas - clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); assertFalse("no shards should be unassigned", clusterState.getRoutingNodes().unassigned().size() > 0); String nodeId = null; final List allShards = clusterState.getRoutingTable().allShards("test"); @@ -228,9 +227,9 @@ public void testDelayedUnassignedScheduleRerouteAfterDelayedReroute() throws Exc // allocate shards clusterState = allocationService.reroute(clusterState, "reroute"); // start primaries - clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); // start replicas - clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); assertThat("all shards should be started", clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); // find replica of short_delay @@ -385,9 +384,9 @@ public void testDelayedUnassignedScheduleRerouteRescheduledOnShorterDelay() thro allocationService.setNanoTimeOverride(nodeLeftTimestampNanos); clusterState = allocationService.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); // starting replicas - clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); assertFalse("no shards should be unassigned", clusterState.getRoutingNodes().unassigned().size() > 0); String nodeIdOfFooReplica = null; for (ShardRouting shardRouting : clusterState.getRoutingTable().allShards("foo")) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java index 8a9b00a8d4ff7..34dedc823fc67 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java @@ -110,10 +110,10 @@ private void incrementPrimaryTerm(String index, int shard) { } private boolean startInitializingShards(String index) { - final List startedShards = this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING); + final List startedShards = clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING); logger.info("start primary shards for index [{}]: {} ", index, startedShards); - ClusterState rerouteResult = allocationService.applyStartedShards(this.clusterState, startedShards); - boolean changed = rerouteResult.equals(this.clusterState) == false; + ClusterState rerouteResult = startShardsAndReroute(allocationService, clusterState, startedShards); + boolean changed = rerouteResult.equals(clusterState) == false; applyRerouteResult(rerouteResult); return changed; } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 851fe9c550270..c573206d848bf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -38,7 +38,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -104,8 +103,7 @@ private void initPrimaries() { private void startInitializingShards(String index) { logger.info("start primary shards for index {}", index); - this.clusterState = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, - this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING)); + clusterState = startInitializingShardsAndReroute(ALLOCATION_SERVICE, clusterState, index); } private IndexMetaData.Builder createIndexMetaData(String indexName) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 53bdddcd71aeb..20216af9bf38e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -45,7 +45,6 @@ import java.nio.ByteBuffer; import java.util.Collections; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.equalTo; @@ -193,7 +192,7 @@ public void testReplicaAdded() { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); IndexRoutingTable.Builder builder = IndexRoutingTable.builder(index); for (IndexShardRoutingTable indexShardRoutingTable : clusterState.routingTable().index(index)) { builder.addIndexShard(indexShardRoutingTable); @@ -237,9 +236,9 @@ public void testNodeLeave() { .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // starting replicas - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // remove node2 and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); @@ -269,9 +268,9 @@ public void testFailedShard() { .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // starting replicas - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // fail shard ShardRouting shardToFail = clusterState.getRoutingNodes().shardsWithState(STARTED).get(0); @@ -327,9 +326,9 @@ public void testNumberOfDelayedUnassigned() throws Exception { clusterState = allocation.reroute(clusterState, "reroute"); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // starting replicas - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // remove node2 and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); @@ -358,9 +357,9 @@ public void testFindNextDelayedAllocation() { clusterState = allocation.reroute(clusterState, "reroute"); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // starting replicas - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // remove node2 and reroute final long baseTime = System.nanoTime(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index d2ef2de5458cb..31f0bfa01260c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -123,7 +123,7 @@ public void testMinimalRelocations() { assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0)); - ClusterState newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + ClusterState newState = startInitializingShardsAndReroute(service, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); @@ -132,7 +132,7 @@ public void testMinimalRelocations() { assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0)); - newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(service, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); @@ -141,7 +141,7 @@ public void testMinimalRelocations() { assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0)); - newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(service, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); @@ -150,7 +150,7 @@ public void testMinimalRelocations() { assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0)); - newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(service, clusterState); assertThat(newState, equalTo(clusterState)); assertNumIndexShardsPerNode(clusterState, equalTo(2)); logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); @@ -186,7 +186,7 @@ public void testMinimalRelocationsNoLimit() { assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0)); - ClusterState newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + ClusterState newState = startInitializingShardsAndReroute(service, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); @@ -195,7 +195,7 @@ public void testMinimalRelocationsNoLimit() { assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0)); - newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(service, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); @@ -204,7 +204,7 @@ public void testMinimalRelocationsNoLimit() { assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0)); - newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(service, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); @@ -213,7 +213,7 @@ public void testMinimalRelocationsNoLimit() { assertThat(clusterState.getRoutingNodes().node("node0").shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(0)); - newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(service, clusterState); assertThat(newState, equalTo(clusterState)); assertNumIndexShardsPerNode(clusterState, equalTo(2)); logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); @@ -287,13 +287,10 @@ private ClusterState initCluster(AllocationService service, int numberOfNodes, i clusterState = service.reroute(clusterState, "reroute"); logger.info("restart all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(service, clusterState); logger.info("start the replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(service, clusterState); logger.info("complete rebalancing"); return applyStartedShardsUntilNoChange(clusterState, service); @@ -316,12 +313,10 @@ private ClusterState addIndex(ClusterState clusterState, AllocationService servi clusterState = service.reroute(clusterState, "reroute"); logger.info("restart all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(service, clusterState); logger.info("start the replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(service, clusterState); logger.info("complete rebalancing"); return applyStartedShardsUntilNoChange(clusterState, service); @@ -344,12 +339,10 @@ private ClusterState removeNodes(ClusterState clusterState, AllocationService se clusterState = service.disassociateDeadNodes(clusterState, true, "reroute"); logger.info("start all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(service, clusterState); logger.info("start the replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(service, clusterState); logger.info("rebalancing"); clusterState = service.reroute(clusterState, "reroute"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index f3dbdd0eb2552..646ac1e0efba3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -100,7 +100,7 @@ public void testMoveShardCommand() { clusterState = allocation.reroute(clusterState, "reroute"); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); logger.info("move the shard"); String existingNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(); @@ -119,7 +119,7 @@ public void testMoveShardCommand() { assertThat(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); logger.info("finish moving the shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().node(existingNodeId).isEmpty(), equalTo(true)); assertThat(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); @@ -228,7 +228,7 @@ public void testAllocateCommand() { assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0)); logger.info("--> start the primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0)); @@ -253,7 +253,7 @@ public void testAllocateCommand() { logger.info("--> start the replica shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1)); @@ -308,7 +308,7 @@ public void testAllocateStalePrimaryCommand() { Set inSyncAllocationIds = clusterState.metaData().index(index).inSyncAllocationIds(0); assertThat(inSyncAllocationIds, equalTo(Collections.singleton(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID))); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); routingNode1 = clusterState.getRoutingNodes().node(node1); assertThat(routingNode1.size(), equalTo(1)); assertThat(routingNode1.shardsWithState(STARTED).size(), equalTo(1)); @@ -359,7 +359,7 @@ public void testCancelCommand() { } logger.info("--> start the primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0)); @@ -409,7 +409,7 @@ public void testCancelCommand() { } logger.info("--> start the replica shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1)); @@ -435,7 +435,7 @@ public void testCancelCommand() { assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the replica shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1)); @@ -490,8 +490,8 @@ public void testCancelCommand() { assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState(INITIALIZING).get(0).relocatingNodeId(), nullValue()); logger.info("--> start the former target replica shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); - assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); + assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState(STARTED).size(), equalTo(1)); @@ -628,7 +628,7 @@ public void testMoveShardToNonDataNode() { .add(node2)).build(); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); Index index = clusterState.getMetaData().index("test").getIndex(); MoveAllocationCommand command = new MoveAllocationCommand(index.getName(), 0, "node1", "node2"); @@ -666,7 +666,7 @@ public void testMoveShardFromNonDataNode() { .add(node1) .add(node2)).build(); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); Index index = clusterState.getMetaData().index("test").getIndex(); MoveAllocationCommand command = new MoveAllocationCommand(index.getName(), 0, "node2", "node1"); @@ -735,7 +735,7 @@ public void testConflictingCommandsInSingleRequest() { clusterState = allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(index3, 0, node1, true)), false, false).getClusterState(); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); final ClusterState updatedClusterState = clusterState; assertThat(updatedClusterState.getRoutingNodes().node(node1).shardsWithState(STARTED).size(), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java index fdb9fdb46a85c..02398e5b80208 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -80,18 +80,18 @@ public void testPrioritizedIndicesAllocatedFirst() { assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName()); assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName()); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertEquals(2, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size()); assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName()); assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName()); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertEquals(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).toString(),2, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size()); assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName()); assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName()); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertEquals(2, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size()); assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName()); assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java new file mode 100644 index 0000000000000..6f6b6bb39bd9a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + +public class AllocationServiceTests extends ESTestCase { + + public void testFirstListElementsToCommaDelimitedStringReportsAllElementsIfShort() { + List strings = IntStream.range(0, between(0, 10)).mapToObj(i -> randomAlphaOfLength(10)).collect(Collectors.toList()); + assertAllElementsReported(strings, randomBoolean()); + } + + public void testFirstListElementsToCommaDelimitedStringReportsAllElementsIfDebugEnabled() { + List strings = IntStream.range(0, between(0, 100)).mapToObj(i -> randomAlphaOfLength(10)).collect(Collectors.toList()); + assertAllElementsReported(strings, true); + } + + private void assertAllElementsReported(List strings, boolean isDebugEnabled) { + final String abbreviated = AllocationService.firstListElementsToCommaDelimitedString(strings, Function.identity(), isDebugEnabled); + for (String string : strings) { + assertThat(abbreviated, containsString(string)); + } + assertThat(abbreviated, not(containsString("..."))); + } + + public void testFirstListElementsToCommaDelimitedStringReportsFirstElementsIfLong() { + List strings = IntStream.range(0, between(11, 100)).mapToObj(i -> randomAlphaOfLength(10)) + .distinct().collect(Collectors.toList()); + final String abbreviated = AllocationService.firstListElementsToCommaDelimitedString(strings, Function.identity(), false); + for (int i = 0; i < strings.size(); i++) { + if (i < 10) { + assertThat(abbreviated, containsString(strings.get(i))); + } else { + assertThat(abbreviated, not(containsString(strings.get(i)))); + } + } + assertThat(abbreviated, containsString("...")); + assertThat(abbreviated, containsString("[" + strings.size() + " items in total]")); + } + + public void testFirstListElementsToCommaDelimitedStringUsesFormatterNotToString() { + List strings = IntStream.range(0, between(1, 100)).mapToObj(i -> "original").collect(Collectors.toList()); + final String abbreviated = AllocationService.firstListElementsToCommaDelimitedString(strings, s -> "formatted", randomBoolean()); + assertThat(abbreviated, containsString("formatted")); + assertThat(abbreviated, not(containsString("original"))); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index b0ce9ad320a9e..6174323ca69e8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -80,10 +80,10 @@ public void testMoveShardOnceNewNodeWithAttributeAdded1() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); @@ -99,7 +99,7 @@ public void testMoveShardOnceNewNodeWithAttributeAdded1() { equalTo("node3")); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); @@ -143,10 +143,10 @@ public void testMoveShardOnceNewNodeWithAttributeAdded2() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); @@ -162,7 +162,7 @@ public void testMoveShardOnceNewNodeWithAttributeAdded2() { equalTo("node4")); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); @@ -218,10 +218,10 @@ public void testMoveShardOnceNewNodeWithAttributeAdded3() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10)); @@ -238,10 +238,10 @@ public void testMoveShardOnceNewNodeWithAttributeAdded3() { equalTo("node3")); logger.info("--> complete initializing"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> run it again, since we still might have relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10)); @@ -256,7 +256,7 @@ public void testMoveShardOnceNewNodeWithAttributeAdded3() { assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), greaterThan(0)); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10)); @@ -297,10 +297,10 @@ public void testMoveShardOnceNewNodeWithAttributeAdded4() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(10)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20)); @@ -319,9 +319,9 @@ public void testMoveShardOnceNewNodeWithAttributeAdded4() { logger.info("--> complete initializing"); for (int i = 0; i < 2; i++) { logger.info("--> complete initializing round: [{}]", i); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); - } - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + } + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20)); assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(10)); @@ -341,8 +341,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded4() { logger.info("--> complete relocation"); for (int i = 0; i < 2; i++) { logger.info("--> complete initializing round: [{}]", i); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); - } + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + } assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20)); assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(5)); assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(5)); @@ -382,10 +382,10 @@ public void testMoveShardOnceNewNodeWithAttributeAdded5() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); @@ -401,7 +401,7 @@ public void testMoveShardOnceNewNodeWithAttributeAdded5() { equalTo("node3")); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3)); @@ -419,7 +419,7 @@ public void testMoveShardOnceNewNodeWithAttributeAdded5() { equalTo("node4")); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3)); @@ -458,10 +458,10 @@ public void testMoveShardOnceNewNodeWithAttributeAdded6() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4)); @@ -477,7 +477,7 @@ public void testMoveShardOnceNewNodeWithAttributeAdded6() { equalTo("node5")); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4)); @@ -495,7 +495,7 @@ public void testMoveShardOnceNewNodeWithAttributeAdded6() { equalTo("node6")); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4)); @@ -533,7 +533,7 @@ public void testFullAwareness1() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> replica will not start because we have only one rack value"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); @@ -551,7 +551,7 @@ public void testFullAwareness1() { equalTo("node3")); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); @@ -598,7 +598,7 @@ public void testFullAwareness2() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> replica will not start because we have only one rack value"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); @@ -616,7 +616,7 @@ public void testFullAwareness2() { equalTo("node4")); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); @@ -669,7 +669,7 @@ public void testFullAwareness3() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(10)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10)); @@ -685,10 +685,10 @@ public void testFullAwareness3() { equalTo("node3")); logger.info("--> complete initializing"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> run it again, since we still might have relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20)); @@ -703,7 +703,7 @@ public void testFullAwareness3() { assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), greaterThan(0)); logger.info("--> complete relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20)); @@ -744,11 +744,11 @@ public void testUnbalancedZones() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(5)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> all replicas are allocated and started since we have on node in each zone"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(10)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -764,7 +764,7 @@ public void testUnbalancedZones() { equalTo("A-1")); logger.info("--> starting initializing shards on the new node"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10)); assertThat(clusterState.getRoutingNodes().node("A-1").size(), equalTo(2)); @@ -806,7 +806,7 @@ public void testUnassignedShardsWithUnbalancedZones() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shard (primary)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(3)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); // Unassigned shard is expected. @@ -867,11 +867,11 @@ public void testMultipleAwarenessAttributes() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> all replicas are allocated and started since we have one node in each zone and rack"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index ba18bc0f6d42d..81ccdc35b248e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; @@ -47,7 +46,6 @@ import java.util.stream.Collectors; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; @@ -145,12 +143,10 @@ private ClusterState initCluster(AllocationService strategy) { clusterState = strategy.reroute(clusterState, "reroute"); logger.info("restart all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("start the replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("complete rebalancing"); return applyStartedShardsUntilNoChange(clusterState, strategy); @@ -185,12 +181,10 @@ private ClusterState removeNodes(ClusterState clusterState, AllocationService st } logger.info("start all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("start the replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("rebalancing"); clusterState = strategy.reroute(clusterState, "reroute"); @@ -384,8 +378,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing strategy = createAllocationService(settings.build(), new TestGatewayAllocator()); logger.info("use the new allocator and check if it moves shards"); - routingNodes = clusterState.getRoutingNodes(); - routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); + routingTable = startInitializingShardsAndReroute(strategy, clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.getRoutingNodes(); for (RoutingNode routingNode : routingNodes) { @@ -395,7 +388,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } logger.info("start the replica shards"); - routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); + routingTable = startInitializingShardsAndReroute(strategy, clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.getRoutingNodes(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java index 254ba81f93ce6..3f7321d2723a0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTests.java @@ -21,6 +21,7 @@ import org.apache.lucene.util.TestUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -66,11 +67,8 @@ protected ClusterState allocateNew(ClusterState state) { ClusterState clusterState = ClusterState.builder(state).metaData(metaData).routingTable(initialRoutingTable).build(); clusterState = strategy.reroute(clusterState, "reroute"); - while (true) { - if (clusterState.routingTable().shardsWithState(INITIALIZING).isEmpty()) { - break; - } - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + while (clusterState.routingTable().shardsWithState(INITIALIZING).isEmpty() == false) { + clusterState = ESAllocationTestCase.startInitializingShardsAndReroute(strategy, clusterState); } Map counts = new HashMap<>(); for (IndexShardRoutingTable table : clusterState.routingTable().index(index)) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java index db707c5478eff..0f28420e9325f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -160,7 +160,7 @@ private ClusterState rebalance(ClusterState clusterState) { } logger.debug("Initializing shards: {}", initializing); numRelocations += initializing.size(); - clusterState = strategy.applyStartedShards(clusterState, initializing); + clusterState = ESAllocationTestCase.startShardsAndReroute(strategy, clusterState, initializing); } logger.debug("--> num relocations to get balance: {}", numRelocations); return clusterState; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index bca086b8fc92b..8eb056c77825c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -84,8 +84,7 @@ public void testAlways() { } logger.info("start all the primary shards for test1, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -100,9 +99,7 @@ public void testAlways() { } logger.info("start the test1 replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -121,7 +118,7 @@ public void testAlways() { .add(newNode("node3"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.node("node3").size(), equalTo(1)); assertThat(routingNodes.node("node3").iterator().next().shardId().getIndex().getName(), equalTo("test1")); @@ -163,9 +160,7 @@ public void testClusterPrimariesActive1() { } logger.info("start all the primary shards for test1, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2)); @@ -180,9 +175,7 @@ public void testClusterPrimariesActive1() { } logger.info("start the test1 replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2)); @@ -197,9 +190,7 @@ public void testClusterPrimariesActive1() { } logger.info("start all the primary shards for test2, replicas will start initializing"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2)); @@ -218,8 +209,8 @@ public void testClusterPrimariesActive1() { .add(newNode("node3"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.node("node3").size(), equalTo(1)); assertThat(routingNodes.node("node3").iterator().next().shardId().getIndex().getName(), equalTo("test1")); } @@ -260,9 +251,7 @@ public void testClusterPrimariesActive2() { } logger.info("start all the primary shards for test1, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -277,9 +266,7 @@ public void testClusterPrimariesActive2() { } logger.info("start the test1 replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -298,8 +285,7 @@ public void testClusterPrimariesActive2() { .add(newNode("node3"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); - + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.node("node3").isEmpty(), equalTo(true)); } @@ -339,9 +325,7 @@ public void testClusterAllActive1() { } logger.info("start all the primary shards for test1, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -356,9 +340,7 @@ public void testClusterAllActive1() { } logger.info("start the test1 replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -373,9 +355,7 @@ public void testClusterAllActive1() { } logger.info("start all the primary shards for test2, replicas will start initializing"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -390,9 +370,7 @@ public void testClusterAllActive1() { } logger.info("start the test2 replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -411,7 +389,7 @@ public void testClusterAllActive1() { .add(newNode("node3"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.node("node3").size(), equalTo(1)); assertThat(routingNodes.node("node3").iterator().next().shardId().getIndex().getName(), anyOf(equalTo("test1"), @@ -454,9 +432,7 @@ public void testClusterAllActive2() { } logger.info("start all the primary shards for test1, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -471,9 +447,7 @@ public void testClusterAllActive2() { } logger.info("start the test1 replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -492,7 +466,7 @@ public void testClusterAllActive2() { .add(newNode("node3"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.node("node3").isEmpty(), equalTo(true)); } @@ -533,9 +507,7 @@ public void testClusterAllActive3() { } logger.info("start all the primary shards for test1, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -550,9 +522,7 @@ public void testClusterAllActive3() { } logger.info("start the test1 replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -567,9 +537,7 @@ public void testClusterAllActive3() { } logger.info("start all the primary shards for test2, replicas will start initializing"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(2)); @@ -588,7 +556,7 @@ public void testClusterAllActive3() { .add(newNode("node3"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.node("node3").isEmpty(), equalTo(true)); } @@ -637,8 +605,7 @@ public void allocateUnassigned(RoutingAllocation allocation) { } logger.debug("start all the primary shards for test"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test", INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test"); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); @@ -673,8 +640,7 @@ public void allocateUnassigned(RoutingAllocation allocation) { logger.debug("now start initializing shards and expect exactly one rebalance" + " from node1 to node 2 since index [test] is all on node1"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).shards().size(), equalTo(1)); @@ -736,8 +702,7 @@ public void allocateUnassigned(RoutingAllocation allocation) { } logger.debug("start all the primary shards for test"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test", INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test"); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java index d2e36b7fac9b0..22ef1d03d7190 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.settings.Settings; @@ -81,8 +80,7 @@ public void testClusterConcurrentRebalance() { } logger.info("start all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2)); @@ -105,32 +103,28 @@ public void testClusterConcurrentRebalance() { } logger.info("start the replica shards, rebalancing should start, but, only 3 should be rebalancing"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); // we only allow one relocation at a time assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(7)); assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(3)); logger.info("finalize this session relocation, 3 more should relocate now"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); // we only allow one relocation at a time assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(7)); assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(3)); logger.info("finalize this session relocation, 2 more should relocate now"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); // we only allow one relocation at a time assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(8)); assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(2)); logger.info("finalize this session relocation, no more relocation"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); // we only allow one relocation at a time assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index f9dee9807b400..7d9aeeb606111 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -67,9 +67,9 @@ public void testSimpleDeadNodeOnStartedPrimaryShard() { clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // starting replicas - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); logger.info("--> verifying all is allocated"); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); @@ -115,9 +115,9 @@ public void testDeadNodeWhileRelocatingOnToNode() { clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // starting replicas - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); logger.info("--> verifying all is allocated"); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); @@ -185,9 +185,9 @@ public void testDeadNodeWhileRelocatingOnFromNode() { clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // starting replicas - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); logger.info("--> verifying all is allocated"); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index e46c899dbec91..1b4375364305b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -43,7 +44,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.hamcrest.Matchers.equalTo; public class DiskThresholdMonitorTests extends ESAllocationTestCase { @@ -71,16 +72,18 @@ public void testMarkFloodStageIndicesReadOnly() { .add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); ClusterState finalState = clusterState; AtomicBoolean reroute = new AtomicBoolean(false); AtomicReference> indices = new AtomicReference<>(); AtomicLong currentTime = new AtomicLong(); DiskThresholdMonitor monitor = new DiskThresholdMonitor(settings, () -> finalState, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get, (reason, listener) -> { - assertTrue(reroute.compareAndSet(false, true)); - listener.onResponse(null); - }) { + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get, + (reason, priority, listener) -> { + assertTrue(reroute.compareAndSet(false, true)); + assertThat(priority, equalTo(Priority.HIGH)); + listener.onResponse(null); + }) { @Override protected void markIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener) { assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly)); @@ -117,10 +120,12 @@ protected void markIndicesReadOnly(Set indicesToMarkReadOnly, ActionList assertTrue(anotherFinalClusterState.blocks().indexBlocked(ClusterBlockLevel.WRITE, "test_2")); monitor = new DiskThresholdMonitor(settings, () -> anotherFinalClusterState, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get, (reason, listener) -> { - assertTrue(reroute.compareAndSet(false, true)); - listener.onResponse(null); - }) { + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get, + (reason, priority, listener) -> { + assertTrue(reroute.compareAndSet(false, true)); + assertThat(priority, equalTo(Priority.HIGH)); + listener.onResponse(null); + }) { @Override protected void markIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener) { assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly)); @@ -144,10 +149,12 @@ public void testDoesNotSubmitRerouteTaskTooFrequently() { AtomicLong currentTime = new AtomicLong(); AtomicReference> listenerReference = new AtomicReference<>(); DiskThresholdMonitor monitor = new DiskThresholdMonitor(Settings.EMPTY, () -> clusterState, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get, (reason, listener) -> { - assertNotNull(listener); - assertTrue(listenerReference.compareAndSet(null, listener)); - }) { + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get, + (reason, priority, listener) -> { + assertNotNull(listener); + assertThat(priority, equalTo(Priority.HIGH)); + assertTrue(listenerReference.compareAndSet(null, listener)); + }) { @Override protected void markIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener) { throw new AssertionError("unexpected"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java index 74e7ac3273634..23c8d341c13e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.settings.Settings; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -63,16 +62,14 @@ public void testElectReplicaAsPrimaryDuringRelocation() { clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the primary shards"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("Start the replica shards"); - routingNodes = clusterState.getRoutingNodes(); - ClusterState resultingState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + ClusterState resultingState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(resultingState, not(equalTo(clusterState))); clusterState = resultingState; - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2)); assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2)); assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index e649b8f6c180b..eb1385ec35210 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -36,7 +35,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.settings.Settings; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -78,8 +76,7 @@ public Long getShardSize(ShardRouting shardRouting) { assertEquals(byteSize, clusterState.getRoutingTable() .shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize()); logger.info("Start the primary shard"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertEquals(1, clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED)); assertEquals(1, clusterState.getRoutingNodes().unassigned().size()); @@ -122,7 +119,7 @@ public Long getShardSize(ShardRouting shardRouting) { clusterState = allocation.reroute(clusterState, "reroute"); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); logger.info("move the shard"); String existingNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(); @@ -144,7 +141,7 @@ public Long getShardSize(ShardRouting shardRouting) { assertEquals(clusterState.getRoutingNodes().node(toNodeId).iterator().next().getExpectedShardSize(), byteSize); logger.info("finish moving the shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().node(existingNodeId).isEmpty(), equalTo(true)); assertThat(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 6f8f75d48fe37..c3587240a7262 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -88,14 +88,11 @@ public void testSimpleFailedNodeTest() { clusterState = strategy.reroute(clusterState, "reroute"); logger.info("start all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("start the replica shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1)); assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 889be132d45d9..6c353aa8c85e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; @@ -41,7 +40,6 @@ import org.elasticsearch.test.VersionUtils; import java.util.ArrayList; -import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -84,9 +82,9 @@ public void testFailedShardPrimaryRelocatingToAndFrom() { clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // starting replicas - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); logger.info("--> verifying all is allocated"); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); @@ -170,10 +168,7 @@ public void testFailPrimaryStartedCheckReplicaElected() { clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the shards (primaries)"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - - - ClusterState newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + ClusterState newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -191,8 +186,7 @@ public void testFailPrimaryStartedCheckReplicaElected() { } logger.info("Start the shards (backups)"); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -305,10 +299,7 @@ public void testSingleShardMultipleAllocationFailures() { } clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); while (!clusterState.routingTable().shardsWithState(UNASSIGNED).isEmpty()) { - // start all initializing - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); - // and assign more unassigned - clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); } int shardsToFail = randomIntBetween(1, numberOfReplicas); @@ -425,8 +416,7 @@ public void testRebalanceFailure() { clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the shards (primaries)"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - ClusterState newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + ClusterState newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -444,8 +434,7 @@ public void testRebalanceFailure() { } logger.info("Start the shards (backups)"); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -468,7 +457,7 @@ public void testRebalanceFailure() { newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(2)); assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2)); @@ -519,13 +508,12 @@ public void testFailAllReplicasInitializingOnPrimaryFail() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); // start primary shards - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); // start one replica so it can take over. - clusterState = allocation.applyStartedShards(clusterState, - Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))); + clusterState = startShardsAndReroute(allocation, clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0)); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); ShardRouting startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); @@ -567,13 +555,12 @@ public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToEle assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); // start primary shards - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); // start another replica shard, while keep one initializing - clusterState = allocation.applyStartedShards(clusterState, - Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))); + clusterState = startShardsAndReroute(allocation, clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0)); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); @@ -613,7 +600,7 @@ public void testReplicaOnNewestVersionIsPromoted() { assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(3)); // start primary shard - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(3)); @@ -627,7 +614,7 @@ public void testReplicaOnNewestVersionIsPromoted() { clusterState = allocation.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); @@ -643,7 +630,7 @@ public void testReplicaOnNewestVersionIsPromoted() { clusterState = allocation.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java index 86e8887688ff2..4d0fb77d90451 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java @@ -194,10 +194,10 @@ private void testClusterFilters(Settings.Builder allocationServiceSettings, Disc assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> make sure shards are only allocated on tag1 with value1 and value2"); final List startedShards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED); @@ -297,10 +297,10 @@ private void testIndexFilters(Settings.Builder initialIndexSettings, Settings.Bu assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> make sure shards are only allocated on tag1 with value1 and value2"); List startedShards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED); @@ -323,7 +323,7 @@ private void testIndexFilters(Settings.Builder initialIndexSettings, Settings.Bu assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(2)); logger.info("--> finish relocation"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); startedShards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED); assertThat(startedShards.size(), equalTo(4)); @@ -358,7 +358,7 @@ public void testConcurrentRecoveriesAfterShardsCannotRemainOnNode() { assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(INITIALIZING), equalTo(2)); logger.info("--> start the shards (only primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> make sure all shards are started"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); @@ -377,7 +377,7 @@ public void testConcurrentRecoveriesAfterShardsCannotRemainOnNode() { assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(STARTED), equalTo(2)); logger.info("--> start the shards (only primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> move second shard from node1 to node2"); clusterState = strategy.reroute(clusterState, "reroute"); @@ -385,7 +385,7 @@ public void testConcurrentRecoveriesAfterShardsCannotRemainOnNode() { assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(STARTED), equalTo(3)); logger.info("--> start the shards (only primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().node(node2.getId()).numberOfShardsWithState(STARTED), equalTo(4)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java index 5e1ff70d9d68e..8ab5d7bcbade0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java @@ -43,7 +43,6 @@ import java.util.List; import java.util.Set; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.equalTo; @@ -58,7 +57,8 @@ public class InSyncAllocationIdTests extends ESAllocationTestCase { @Before public void setupAllocationService() { allocation = createAllocationService(); - failedClusterStateTaskExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocation, null, logger); + failedClusterStateTaskExecutor + = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocation, null, logger); } public void testInSyncAllocationIdsUpdated() { @@ -84,7 +84,7 @@ public void testInSyncAllocationIdsUpdated() { assertThat(clusterState.metaData().index("test-old").inSyncAllocationIds(0), equalTo(new HashSet<>(Arrays.asList("x", "y")))); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(1)); @@ -93,7 +93,7 @@ public void testInSyncAllocationIdsUpdated() { assertThat(clusterState.metaData().index("test-old").inSyncAllocationIds(0), equalTo(new HashSet<>(Arrays.asList("x", "y")))); logger.info("start replica shards"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(3)); @@ -128,7 +128,7 @@ public void testInSyncAllocationIdsUpdated() { assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(0)); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(1)); @@ -242,7 +242,7 @@ public void testInSyncIdsNotGrowingWithoutBounds() throws Exception { clusterState = allocation.reroute(clusterState, "reroute"); logger.info("start replica shards"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); logger.info("remove the node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) @@ -305,7 +305,7 @@ public void testInSyncIdsNotTrimmedWhenNotGrowing() throws Exception { assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0)); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); // in-sync allocation ids should not be updated assertEquals(inSyncSet, clusterState.metaData().index("test").inSyncAllocationIds(0)); } @@ -360,7 +360,7 @@ private ClusterState createOnePrimaryOneReplicaClusterState(AllocationService al assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(0)); logger.info("start primary shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(1)); @@ -368,7 +368,7 @@ private ClusterState createOnePrimaryOneReplicaClusterState(AllocationService al equalTo(clusterState.metaData().index("test").inSyncAllocationIds(0).iterator().next())); logger.info("start replica shard"); - clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(2)); return clusterState; } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index a44f4c4f913cf..686dcc0643ae5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -105,8 +105,7 @@ public void testBalanceAllNodesStarted() { newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, equalTo(clusterState)); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); @@ -126,11 +125,10 @@ public void testBalanceAllNodesStarted() { assertThat(newState, equalTo(clusterState)); logger.info("Start the more shards"); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -226,8 +224,7 @@ public void testBalanceIncrementallyStartNodes() { logger.info("Start the primary shard"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); @@ -247,13 +244,11 @@ public void testBalanceIncrementallyStartNodes() { assertThat(newState, equalTo(clusterState)); logger.info("Start the backup shard"); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); - assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2)); @@ -282,20 +277,18 @@ public void testBalanceIncrementallyStartNodes() { assertThat(newState, equalTo(clusterState)); logger.info("Start the backup shard"); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); - assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); - assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3)); + assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3)); assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4)); assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4)); @@ -360,8 +353,7 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(newState, equalTo(clusterState)); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); @@ -381,11 +373,10 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(newState, equalTo(clusterState)); logger.info("Start the more shards"); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2)); @@ -436,8 +427,7 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(newState, equalTo(clusterState)); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3)); @@ -457,8 +447,7 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(newState, equalTo(clusterState)); logger.info("Start the more shards"); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index 2ce0b7b89bec2..19af72d18db4a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -72,7 +72,7 @@ private ClusterState createInitialClusterState() { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .build(); RoutingTable prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute", false).routingTable(); + routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertEquals(prevRoutingTable.index("idx").shards().size(), 1); @@ -204,7 +204,7 @@ public void testFailedAllocation() { Settings.builder().put(clusterState.metaData().index("idx").getSettings()).put("index.allocation.max_retries", retries+1).build() ).build(), true).build()).build(); - ClusterState newState = strategy.reroute(clusterState, "settings changed", false); + ClusterState newState = strategy.reroute(clusterState, "settings changed"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -219,8 +219,7 @@ public void testFailedAllocation() { routingTable.index("idx").shard(0).shards().get(0), null, new RoutingAllocation(null, null, clusterState, null, 0))); // now we start the shard - clusterState = strategy.applyStartedShards(clusterState, Collections.singletonList( - routingTable.index("idx").shard(0).shards().get(0))); + clusterState = startShardsAndReroute(strategy, clusterState, routingTable.index("idx").shard(0).shards().get(0)); routingTable = clusterState.routingTable(); // all counters have been reset to 0 ie. no unassigned info diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index f0d25175f6e43..e4b215cf2e25c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -122,8 +122,7 @@ public void testDoNotAllocateFromPrimary() { } logger.info("start all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3)); @@ -132,8 +131,7 @@ public void testDoNotAllocateFromPrimary() { assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1)); } - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3)); @@ -167,8 +165,7 @@ public void testDoNotAllocateFromPrimary() { assertThat(clusterState.routingTable().index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1)); } - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(3)); @@ -396,7 +393,7 @@ private ClusterState stabilize(ClusterState clusterState, AllocationService serv boolean changed; do { logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); - ClusterState newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + ClusterState newState = startInitializingShardsAndReroute(service, clusterState); changed = newState.equals(clusterState) == false; clusterState = newState; routingNodes = clusterState.getRoutingNodes(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java index d80feedabf0e6..34c925bc0bcac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java @@ -74,8 +74,8 @@ public void testPreferLocalPrimaryAllocationOverFiltered() { clusterState = strategy.reroute(clusterState, "reroute"); - while (!clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty()) { - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + while (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + clusterState = startInitializingShardsAndReroute(strategy, clusterState); } logger.info("remove one of the nodes and apply filter to move everything from another node"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java index d54d798544c84..34052b4c79132 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java @@ -65,8 +65,8 @@ public void testPreferPrimaryAllocationOverReplicas() { .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); - while (!clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty()) { - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + while (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + clusterState = startInitializingShardsAndReroute(strategy, clusterState); } logger.info("increasing the number of replicas to 1, and perform a reroute (to get the replicas allocation going)"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java index f06a38a2ba462..e9a7afe6be3c6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java @@ -67,11 +67,11 @@ public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() logger.info("Start the primary shard (on node1)"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node1")); logger.info("Start the backup shard (on node2)"); routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node2")); logger.info("Adding third node and reroute and kill first node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) @@ -115,9 +115,8 @@ public void testRemovingInitializingReplicasIfPrimariesFails() { clusterState = allocation.reroute(clusterState, "reroute"); logger.info("Start the primary shards"); + clusterState = startInitializingShardsAndReroute(allocation, clusterState); RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = allocation.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(2)); assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java index a3fddda7b6e33..e53638bdca485 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java @@ -64,7 +64,7 @@ public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() { logger.info("Start the primary shard (on node1)"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node1")); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 0c8ebff594598..e7f46871b60e0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -135,7 +135,7 @@ public void testRandomDecisions() { clusterState = strategy.reroute(clusterState, "reroute"); } if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) { - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); } } logger.info("Fill up nodes such that every shard can be allocated"); @@ -158,7 +158,7 @@ public void testRandomDecisions() { iterations++; clusterState = strategy.reroute(clusterState, "reroute"); if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) { - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); } } while (clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 || diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index ff54fa06095f2..33c7b9afce6ea 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -99,9 +99,7 @@ public Long getShardSize(ShardRouting shardRouting) { } logger.info("start all the primary shards, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2)); @@ -116,7 +114,6 @@ public Long getShardSize(ShardRouting shardRouting) { .add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2)); @@ -127,9 +124,7 @@ public Long getShardSize(ShardRouting shardRouting) { } logger.info("start the replica shards, rebalancing should start"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); // we only allow one relocation at a time assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5)); @@ -146,9 +141,7 @@ public Long getShardSize(ShardRouting shardRouting) { } logger.info("complete relocation, other half of relocation should happen"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); // we now only relocate 3, since 2 remain where they are! assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(7)); @@ -163,9 +156,8 @@ public Long getShardSize(ShardRouting shardRouting) { logger.info("complete relocation, that's it!"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10)); // make sure we have an even relocation diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java index 5e61b35b5ec48..3625558178d29 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java @@ -89,9 +89,7 @@ public void testBackupIsAllocatedAfterPrimary() { logger.info("Start all the primary shards"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); prevRoutingTable = routingTable; - routingTable = strategy.applyStartedShards(clusterState, - routingNodes.node(nodeHoldingPrimary).shardsWithState(INITIALIZING)).routingTable(); - clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + routingTable = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node(nodeHoldingPrimary)).routingTable(); final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(); assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica))); assertThat(prevRoutingTable != routingTable, equalTo(true)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java index c64bc51bd5b7c..d6da7ec0a533f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.gateway.TestGatewayAllocator; -import java.util.Arrays; import java.util.Collections; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -61,10 +60,6 @@ public void setUp() throws Exception { } private ClusterState createInitialClusterState(boolean startShards) { - return createInitialClusterState(startShards, Version.CURRENT); - } - - private ClusterState createInitialClusterState(boolean startShards, Version nodeVersion) { MetaData.Builder metaBuilder = MetaData.builder(); metaBuilder.put(IndexMetaData.builder("source").settings(settings(Version.CURRENT)) .numberOfShards(2).numberOfReplicas(0).setRoutingNumShards(16)); @@ -75,11 +70,11 @@ private ClusterState createInitialClusterState(boolean startShards, Version node RoutingTable routingTable = routingTableBuilder.build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1", nodeVersion)).add(newNode - ("node2", nodeVersion))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1", Version.CURRENT)).add(newNode + ("node2", Version.CURRENT))) .build(); RoutingTable prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute", false).routingTable(); + routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertEquals(prevRoutingTable.index("source").shards().size(), 2); @@ -94,9 +89,9 @@ private ClusterState createInitialClusterState(boolean startShards, Version node if (startShards) { - clusterState = strategy.applyStartedShards(clusterState, - Arrays.asList(routingTable.index("source").shard(0).shards().get(0), - routingTable.index("source").shard(1).shards().get(0))); + clusterState = startShardsAndReroute(strategy, clusterState, + routingTable.index("source").shard(0).shards().get(0), + routingTable.index("source").shard(1).shards().get(0)); routingTable = clusterState.routingTable(); assertEquals(routingTable.index("source").shards().size(), 2); assertEquals(routingTable.index("source").shard(0).shards().get(0).state(), STARTED); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java index 1ca516da26286..59ee72ddbd6e5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java @@ -69,7 +69,7 @@ private ShardRouting getReplica() { public void testRetryFailedResetForAllocationCommands() { final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY); clusterState = strategy.reroute(clusterState, "initial allocation"); - clusterState = strategy.applyStartedShards(clusterState, Collections.singletonList(getPrimary())); + clusterState = startShardsAndReroute(strategy, clusterState, getPrimary()); // Exhaust all replica allocation attempts with shard failures for (int i = 0; i < retries; i++) { @@ -90,7 +90,7 @@ public void testRetryFailedResetForAllocationCommands() { clusterState = result.getClusterState(); assertEquals(ShardRoutingState.INITIALIZING, getReplica().state()); - clusterState = strategy.applyStartedShards(clusterState, Collections.singletonList(getReplica())); + clusterState = startShardsAndReroute(strategy, clusterState, getReplica()); assertEquals(ShardRoutingState.STARTED, getReplica().state()); assertFalse(clusterState.getRoutingNodes().hasUnassignedShards()); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index 72364e3fbc925..3d52ae1087218 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -84,16 +84,14 @@ public void testBalanceAllNodesStarted() { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("Reroute, nothing should change"); ClusterState newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, equalTo(clusterState)); logger.info("Start the more shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -101,8 +99,7 @@ public void testBalanceAllNodesStarted() { assertThat(routingNodes.hasInactivePrimaries(), equalTo(false)); assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false)); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - + startInitializingShardsAndReroute(strategy, clusterState); } public void testBalanceIncrementallyStartNodes() { @@ -135,19 +132,15 @@ public void testBalanceIncrementallyStartNodes() { clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the primary shard"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("Reroute, nothing should change"); clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the backup shard"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - routingNodes = clusterState.getRoutingNodes(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) @@ -159,18 +152,16 @@ public void testBalanceIncrementallyStartNodes() { assertThat(newState, equalTo(clusterState)); logger.info("Start the backup shard"); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); + RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(clusterState.routingTable().index("test1").shards().size(), equalTo(3)); @@ -233,7 +224,7 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1)); assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1)); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -249,8 +240,7 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(newState, equalTo(clusterState)); logger.info("Start the more shards"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -302,8 +292,7 @@ public void testBalanceAllNodesStartedAddIndex() { logger.info("Reroute, start the primaries"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -312,8 +301,7 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false)); logger.info("Reroute, start the replicas"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -344,8 +332,7 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false)); logger.info("Start Recovering shards round 1"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -354,8 +341,7 @@ public void testBalanceAllNodesStartedAddIndex() { assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false)); logger.info("Start Recovering shards round 2"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index 9856bd064ca72..254d65a0e5b46 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -80,7 +80,7 @@ public void testSameHost() { assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2)); logger.info("--> start all primary shards, no replica will be started since its on the same host"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), ShardRoutingState.STARTED), equalTo(2)); assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index fb3f75aad5f21..323483f45d233 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -23,14 +23,13 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.cluster.ESAllocationTestCase; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; @@ -77,10 +76,7 @@ public void testSimple() { } logger.info("start all the primary shards for test1, replicas will start initializing"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable(); - clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - routingNodes = clusterState.getRoutingNodes(); + routingTable = startInitializingShardsAndReroute(strategy, clusterState, "test1").routingTable(); for (int i = 0; i < routingTable.index("test1").shards().size(); i++) { assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 87339868e4c2c..b13d03cf9f4cc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -27,14 +27,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.settings.Settings; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType; @@ -71,8 +69,7 @@ public void testIndexLevelShardsLimitAllocate() { assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2)); logger.info("Start the primary shards"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2)); assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(0)); @@ -81,8 +78,7 @@ public void testIndexLevelShardsLimitAllocate() { assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(4)); logger.info("Do another reroute, make sure its still not allocated"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + startInitializingShardsAndReroute(strategy, clusterState); } public void testClusterLevelShardsLimitAllocate() { @@ -114,8 +110,7 @@ public void testClusterLevelShardsLimitAllocate() { assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1)); logger.info("Start the primary shards"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(1)); @@ -133,8 +128,7 @@ public void testClusterLevelShardsLimitAllocate() { assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1)); assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1)); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2)); assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2)); @@ -171,8 +165,7 @@ public void testIndexLevelShardsLimitRemain() { clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the primary shards"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), STARTED), equalTo(5)); @@ -194,8 +187,7 @@ public void testIndexLevelShardsLimitRemain() { .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(numberOfShardsOfType(clusterState.getRoutingNodes(), STARTED), equalTo(10)); @@ -228,8 +220,7 @@ public void testIndexLevelShardsLimitRemain() { assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(3)); // the first move will destroy the balance and the balancer will move 2 shards from node2 to node one right after // moving the nodes to node2 since we consider INITIALIZING nodes during rebalance - routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); // now we are done compared to EvenShardCountAllocator since the Balancer is not soely based on the average assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(5)); assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(5)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index 8aab17160fd65..8c21aa0334189 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -91,7 +91,7 @@ public void testSingleIndexStartedShard() { logger.info("Marking the shard as started"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node1")); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -150,7 +150,7 @@ public void testSingleIndexStartedShard() { logger.info("Start the shard on node 1"); routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node1")); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -290,7 +290,7 @@ public void testMultiIndexEvenDistribution() { assertThat(newState, equalTo(clusterState)); logger.info("Marking the shard as started"); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startShardsAndReroute(strategy, clusterState, routingNodes.shardsWithState(INITIALIZING)); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -373,8 +373,7 @@ public void testMultiIndexUnevenNodes() { assertThat(newState, equalTo(clusterState)); clusterState = newState; - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -390,7 +389,7 @@ public void testMultiIndexUnevenNodes() { assertThat("4 target shard routing are initializing", numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(4)); logger.info("Now, mark the relocated as started"); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); // routingTable = strategy.reroute(new RoutingStrategyInfo(metaData, routingTable), nodes); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java index a7980b4d0f9e5..17f5f74547504 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java @@ -90,7 +90,7 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { logger.info("Start the primary shard (on node1)"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node1")); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -113,7 +113,7 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { logger.info("Start the backup shard"); routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node2")); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index d39912f0b1e6a..50b1d06273040 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.shard.ShardId; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -74,7 +73,7 @@ public void testStartedShardsMatching() { logger.info("--> test starting of shard"); - ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(initShard)); + ClusterState newState = startShardsAndReroute(allocation, state, initShard); assertThat("failed to start " + initShard + "\ncurrent routing table:" + newState.routingTable(), newState, not(equalTo(state))); assertTrue(initShard + "isn't started \ncurrent routing table:" + newState.routingTable(), @@ -82,7 +81,7 @@ public void testStartedShardsMatching() { state = newState; logger.info("--> testing starting of relocating shards"); - newState = allocation.applyStartedShards(state, Arrays.asList(relocatingShard.getTargetRelocatingShard())); + newState = startShardsAndReroute(allocation, state, relocatingShard.getTargetRelocatingShard()); assertThat("failed to start " + relocatingShard + "\ncurrent routing table:" + newState.routingTable(), newState, not(equalTo(state))); ShardRouting shardRouting = newState.routingTable().index("test").shard(relocatingShard.id()).getShards().get(0); @@ -131,7 +130,7 @@ public void testRelocatingPrimariesWithInitializingReplicas() { ClusterState state = stateBuilder.build(); logger.info("--> test starting of relocating primary shard with initializing / relocating replica"); - ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(relocatingPrimary.getTargetRelocatingShard())); + ClusterState newState = startShardsAndReroute(allocation, state, relocatingPrimary.getTargetRelocatingShard()); assertNotEquals(newState, state); assertTrue(newState.routingTable().index("test").allPrimaryShardsActive()); ShardRouting startedReplica = newState.routingTable().index("test").shard(0).replicaShards().get(0); @@ -152,7 +151,7 @@ public void testRelocatingPrimariesWithInitializingReplicas() { startedShards.add(relocatingPrimary.getTargetRelocatingShard()); startedShards.add(relocatingReplica ? replica.getTargetRelocatingShard() : replica); Collections.shuffle(startedShards, random()); - newState = allocation.applyStartedShards(state, startedShards); + newState = startShardsAndReroute(allocation, state, startedShards); assertNotEquals(newState, state); assertTrue(newState.routingTable().index("test").shard(0).allShardsStarted()); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index 1a5127bcda501..c11cc99026f55 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -104,7 +104,7 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { logger.info("Start the primary shard (on node1)"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node1")); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -127,7 +127,7 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { logger.info("Start the backup shard"); routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node2")); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); @@ -162,7 +162,7 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { logger.info("Start the shards on node 3"); routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.node("node3").shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node3")); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 3c88de4b639ca..6a4b68c16f6ba 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -91,28 +91,28 @@ public void testPrimaryRecoveryThrottling() { assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(17)); logger.info("start initializing, another 3 should initialize"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(3)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(3)); assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(14)); logger.info("start initializing, another 3 should initialize"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(6)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(3)); assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(11)); logger.info("start initializing, another 1 should initialize"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(9)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(10)); logger.info("start initializing, all primaries should be started"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -144,14 +144,14 @@ public void testReplicaAndPrimaryRecoveryThrottling() { assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(7)); logger.info("start initializing, another 2 should initialize"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(3)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(2)); assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(5)); logger.info("start initializing, all primaries should be started"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -167,14 +167,14 @@ public void testReplicaAndPrimaryRecoveryThrottling() { assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(2)); logger.info("start initializing replicas"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(8)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(2)); assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0)); logger.info("start initializing replicas, all should be started"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -205,13 +205,13 @@ public void testThrottleIncomingAndOutgoing() { assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 5); logger.info("start initializing, all primaries should be started"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(4)); assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(0)); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("start another 2 nodes, 5 shards should be relocating - at most 5 are allowed per node"); clusterState = ClusterState.builder(clusterState) @@ -227,7 +227,7 @@ public void testThrottleIncomingAndOutgoing() { assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 0); assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 5); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("start the relocating shards, one more shard should relocate away from node1"); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(8)); @@ -262,7 +262,7 @@ public void testOutgoingThrottlesAllocation() { assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(2)); logger.info("start initializing"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -279,7 +279,7 @@ public void testOutgoingThrottlesAllocation() { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); logger.info("start initializing non-primary"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java index 0d063d7623410..851e2260313f9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.settings.Settings; @@ -75,12 +74,10 @@ public void testUpdateNumberOfReplicas() { clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start all the primary shards"); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("Start all the replica shards"); - routingNodes = clusterState.getRoutingNodes(); - ClusterState newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + ClusterState newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -99,7 +96,6 @@ public void testUpdateNumberOfReplicas() { logger.info("add another replica"); - routingNodes = clusterState.getRoutingNodes(); final String[] indices = {"test"}; RoutingTable updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(2, indices).build(); @@ -137,8 +133,7 @@ public void testUpdateNumberOfReplicas() { assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3")); - routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); + newState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -154,7 +149,6 @@ public void testUpdateNumberOfReplicas() { anyOf(equalTo(nodeHoldingReplica), equalTo("node3"))); logger.info("now remove a replica"); - routingNodes = clusterState.getRoutingNodes(); updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(1, indices).build(); metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1, indices).build(); clusterState = ClusterState.builder(clusterState).routingTable(updatedRoutingTable).metaData(metaData).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 651ed26e4cf5e..4c48ce7b36068 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -130,7 +130,7 @@ public void testDiskThreshold() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logShardStates(clusterState); // Assert that we're able to start the primary @@ -139,7 +139,7 @@ public void testDiskThreshold() { assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0)); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logShardStates(clusterState); // Assert that the replica couldn't be started since node1 doesn't have enough space @@ -158,7 +158,7 @@ public void testDiskThreshold() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logShardStates(clusterState); // Assert that the replica couldn't be started since node1 doesn't have enough space @@ -237,7 +237,7 @@ public void testDiskThreshold() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> apply INITIALIZING shards"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logShardStates(clusterState); assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0)); @@ -336,7 +336,7 @@ public void testDiskThresholdWithAbsoluteSizes() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logShardStates(clusterState); // Assert that we're able to start the primary and replica, since they were both initializing @@ -362,7 +362,7 @@ public void testDiskThresholdWithAbsoluteSizes() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logShardStates(clusterState); // Assert that all replicas could be started @@ -445,7 +445,7 @@ public void testDiskThresholdWithAbsoluteSizes() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> apply INITIALIZING shards"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logShardStates(clusterState); // primary shard already has been relocated away @@ -471,7 +471,7 @@ public void testDiskThresholdWithAbsoluteSizes() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> apply INITIALIZING shards"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> final cluster state:"); logShardStates(clusterState); @@ -533,8 +533,7 @@ Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLU routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logger.info("--> start the shards (primaries)"); - routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)) - .routingTable(); + routingTable = startInitializingShardsAndReroute(strategy, clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logShardStates(clusterState); @@ -599,8 +598,7 @@ Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLU assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)) - .routingTable(); + routingTable = startInitializingShardsAndReroute(strategy, clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logShardStates(clusterState); @@ -695,7 +693,7 @@ Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLU assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4)); logger.info("--> start the shards"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logShardStates(clusterState); // Assert that we're able to start the primary and replicas diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 47c4cb60331c4..8f790b41dad04 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -381,7 +381,7 @@ public void testSizeShrinkIndex() { .build(); clusterState = allocationService.reroute(clusterState, "foo"); - clusterState = allocationService.applyStartedShards(clusterState, + clusterState = startShardsAndReroute(allocationService, clusterState, clusterState.getRoutingTable().index("test").shardsWithState(ShardRoutingState.UNASSIGNED)); RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, info, 0); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java index 9fcd3d97f1fbe..05f32f868a195 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java @@ -75,9 +75,7 @@ private static ClusterState createClusterStateWithAllShardsAssigned() { while (clusterState.getRoutingNodes().hasUnassignedShards() || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).isEmpty() == false) { - clusterState = allocationService.applyStartedShards(clusterState, - clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING)); - clusterState = allocationService.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); } return clusterState; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index b5f68c3956f97..c342443ad707b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -109,8 +109,7 @@ public void testClusterEnableOnlyPrimaries() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - routingTable = strategy.applyStartedShards(clusterState, - clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); + routingTable = startInitializingShardsAndReroute(strategy, clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -143,9 +142,9 @@ public void testIndexEnableNone() { clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> start the shards (replicas)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); logger.info("--> verify only enabled index has been routed"); assertThat(clusterState.getRoutingNodes().shardsWithState("enabled", STARTED).size(), equalTo(2)); @@ -192,11 +191,11 @@ public void testEnableClusterBalance() { clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4)); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(8)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -250,7 +249,7 @@ public void testEnableClusterBalance() { default: fail("only replicas, primaries or all are allowed"); } - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(8)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -288,7 +287,7 @@ public void testEnableClusterBalanceNoReplicas() { clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(6)); logger.info("--> start the shards (primaries)"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index 1bafb6b13755a..17d963caf85fd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -97,14 +97,14 @@ public void testFilterInitialRecovery() { assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING); assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node2"); - state = service.applyStartedShards(state, routingTable.index("idx").shard(0).shardsWithState(INITIALIZING)); + state = startShardsAndReroute(service, state, routingTable.index("idx").shard(0).shardsWithState(INITIALIZING)); routingTable = state.routingTable(); // ok now we are started and can be allocated anywhere!! lets see... // first create another copy assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), INITIALIZING); assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1"); - state = service.applyStartedShards(state, routingTable.index("idx").shard(0).replicaShardsWithState(INITIALIZING)); + state = startShardsAndReroute(service, state, routingTable.index("idx").shard(0).replicaShardsWithState(INITIALIZING)); routingTable = state.routingTable(); assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), STARTED); assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1"); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 0736d29cf6602..c556f2a3a5e9a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.cluster.service; import org.apache.logging.log4j.Level; @@ -105,7 +106,7 @@ private TimedClusterApplierService createTimedClusterService(boolean makeMaster) return timedClusterApplierService; } - @TestLogging("org.elasticsearch.cluster.service:TRACE") // To ensure that we log cluster state events on TRACE level + @TestLogging(value = "org.elasticsearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level") public void testClusterStateUpdateLogging() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); mockAppender.start(); @@ -176,7 +177,7 @@ public void onFailure(String source, Exception e) { } } - @TestLogging("org.elasticsearch.cluster.service:WARN") // To ensure that we log cluster state events on WARN level + @TestLogging(value = "org.elasticsearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") public void testLongClusterStateUpdateLogging() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); mockAppender.start(); @@ -507,4 +508,5 @@ protected long currentTimeInMillis() { return super.currentTimeInMillis(); } } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index a61545810db1f..2bf043f6a199e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -16,11 +16,12 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.cluster.service; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -32,6 +33,8 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.coordination.ClusterStatePublisher; +import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Nullable; @@ -42,8 +45,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.cluster.coordination.ClusterStatePublisher; -import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -305,7 +306,7 @@ public void onFailure(String source, Exception e) { assertTrue(published.get()); } - @TestLogging("org.elasticsearch.cluster.service:TRACE") // To ensure that we log cluster state events on TRACE level + @TestLogging(value = "org.elasticsearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level") public void testClusterStateUpdateLogging() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); mockAppender.start(); @@ -642,7 +643,7 @@ public void onFailure(String source, Exception e) { containsString("Reason: [Blocking operation]")); } - @TestLogging("org.elasticsearch.cluster.service:WARN") // To ensure that we log cluster state events on WARN level + @TestLogging(value = "org.elasticsearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") public void testLongClusterStateUpdateLogging() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); mockAppender.start(); @@ -923,4 +924,5 @@ protected long currentTimeInNanos() { public static ClusterState discoveryState(MasterService masterService) { return masterService.state(); } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index 59c11b2bc9442..993a8ce408b4a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -47,7 +47,6 @@ import java.util.Map; import static java.util.Collections.singletonMap; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -255,9 +254,8 @@ public void testAttributePreferenceRouting() { ).build(); clusterState = strategy.reroute(clusterState, "reroute"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); - - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); // after all are started, check routing iteration ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0) @@ -304,7 +302,7 @@ public void testNodeSelectorRouting(){ clusterState = strategy.reroute(clusterState, "reroute"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); ShardsIterator shardsIterator = clusterState.routingTable().index("test") .shard(0).onlyNodeSelectorActiveInitializingShardsIt("disk:ebs",clusterState.nodes()); @@ -382,9 +380,8 @@ public void testShardsAndPreferNodeRouting() { ).build(); clusterState = strategy.reroute(clusterState, "reroute"); - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); - - clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java index 4a1b1e1016fb9..099d96291adea 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.ESBlobStoreTestCase; +import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import java.io.IOException; import java.nio.file.Files; @@ -74,7 +75,7 @@ public void testReadOnly() throws Exception { byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); writeBlob(container, "test", new BytesArray(data)); assertArrayEquals(readBlobFully(container, "test", data.length), data); - assertTrue(container.blobExists("test")); + assertTrue(BlobStoreTestUtil.blobExists(container, "test")); } } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java index 9548d14cca9a1..9e5d7d7c6ce09 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java @@ -31,6 +31,7 @@ import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; +import java.text.ParseException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -41,14 +42,14 @@ abstract class BaseGeoParsingTestCase extends ESTestCase { protected static final GeometryFactory GEOMETRY_FACTORY = SPATIAL_CONTEXT.getGeometryFactory(); - public abstract void testParsePoint() throws IOException; - public abstract void testParseMultiPoint() throws IOException; - public abstract void testParseLineString() throws IOException; - public abstract void testParseMultiLineString() throws IOException; - public abstract void testParsePolygon() throws IOException; - public abstract void testParseMultiPolygon() throws IOException; - public abstract void testParseEnvelope() throws IOException; - public abstract void testParseGeometryCollection() throws IOException; + public abstract void testParsePoint() throws IOException, ParseException; + public abstract void testParseMultiPoint() throws IOException, ParseException; + public abstract void testParseLineString() throws IOException, ParseException; + public abstract void testParseMultiLineString() throws IOException, ParseException; + public abstract void testParsePolygon() throws IOException, ParseException; + public abstract void testParseMultiPolygon() throws IOException, ParseException; + public abstract void testParseEnvelope() throws IOException, ParseException; + public abstract void testParseGeometryCollection() throws IOException, ParseException; protected void assertValidException(XContentBuilder builder, Class expectedException) throws IOException { try (XContentParser parser = createParser(builder)) { @@ -57,13 +58,16 @@ protected void assertValidException(XContentBuilder builder, Class expectedEx } } - protected void assertGeometryEquals(Object expected, XContentBuilder geoJson, boolean useJTS) throws IOException { + protected void assertGeometryEquals(Object expected, XContentBuilder geoJson, boolean useJTS) throws IOException, ParseException { try (XContentParser parser = createParser(geoJson)) { parser.nextToken(); if (useJTS) { ElasticsearchGeoAssertions.assertEquals(expected, ShapeParser.parse(parser).buildS4J()); } else { - ElasticsearchGeoAssertions.assertEquals(expected, ShapeParser.parse(parser).buildGeometry()); + GeometryParser geometryParser = new GeometryParser(true, true, true); + org.elasticsearch.geo.geometry.Geometry shape = geometryParser.parse(parser); + shape = new GeometryIndexer(true).prepareForIndexing(shape); + ElasticsearchGeoAssertions.assertEquals(expected, shape); } } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java index 4146adb2d299a..ef45194146de3 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.geo.utils.GeographyValidator; import java.io.IOException; +import java.text.ParseException; import java.util.Arrays; import java.util.Collections; @@ -149,7 +150,7 @@ public void testParseMultiDimensionShapes() throws IOException { @Override public void testParseEnvelope() throws IOException { // test #1: envelope with expected coordinate order (TopLeft, BottomRight) - XContentBuilder multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope") + XContentBuilder multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", randomBoolean() ? "envelope" : "bbox") .startArray("coordinates") .startArray().value(-50).value(30).endArray() .startArray().value(50).value(-30).endArray() @@ -159,7 +160,7 @@ public void testParseEnvelope() throws IOException { assertGeometryEquals(expected, multilinesGeoJson); // test #2: envelope that spans dateline - multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope") + multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", randomBoolean() ? "envelope" : "bbox") .startArray("coordinates") .startArray().value(50).value(30).endArray() .startArray().value(-50).value(-30).endArray() @@ -170,7 +171,7 @@ public void testParseEnvelope() throws IOException { assertGeometryEquals(expected, multilinesGeoJson); // test #3: "envelope" (actually a triangle) with invalid number of coordinates (TopRight, BottomLeft, BottomRight) - multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope") + multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", randomBoolean() ? "envelope" : "bbox") .startArray("coordinates") .startArray().value(50).value(30).endArray() .startArray().value(-50).value(-30).endArray() @@ -184,7 +185,7 @@ public void testParseEnvelope() throws IOException { } // test #4: "envelope" with empty coordinates - multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope") + multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", randomBoolean() ? "envelope" : "bbox") .startArray("coordinates") .endArray() .endObject(); @@ -618,7 +619,7 @@ public void testParseGeometryCollection() throws IOException { assertGeometryEquals(geometryExpected, geometryCollectionGeoJson); } - public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() throws IOException { + public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() throws IOException, ParseException { XContentBuilder pointGeoJson = XContentFactory.jsonBuilder() .startObject() .startObject("crs") diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java index 46766b4e11f4b..b113c8046e098 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java @@ -23,28 +23,25 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.geometry.Geometry; -import org.elasticsearch.geo.geometry.GeometryCollection; -import org.elasticsearch.geo.geometry.Line; -import org.elasticsearch.geo.geometry.LinearRing; -import org.elasticsearch.geo.geometry.MultiLine; -import org.elasticsearch.geo.geometry.MultiPoint; -import org.elasticsearch.geo.geometry.MultiPolygon; -import org.elasticsearch.geo.geometry.Point; -import org.elasticsearch.geo.geometry.Polygon; -import org.elasticsearch.geo.geometry.Rectangle; import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import java.util.Objects; -import java.util.function.Function; import java.util.function.Supplier; +import static org.elasticsearch.geo.GeometryTestUtils.randomCircle; +import static org.elasticsearch.geo.GeometryTestUtils.randomGeometryCollection; +import static org.elasticsearch.geo.GeometryTestUtils.randomLine; +import static org.elasticsearch.geo.GeometryTestUtils.randomMultiLine; +import static org.elasticsearch.geo.GeometryTestUtils.randomMultiPoint; +import static org.elasticsearch.geo.GeometryTestUtils.randomMultiPolygon; +import static org.elasticsearch.geo.GeometryTestUtils.randomPoint; +import static org.elasticsearch.geo.GeometryTestUtils.randomPolygon; + public class GeoJsonSerializationTests extends ESTestCase { private static class GeometryWrapper implements ToXContentObject { @@ -119,7 +116,7 @@ public void testMultiPolygon() throws IOException { } public void testEnvelope() throws IOException { - xContentTest(GeoJsonSerializationTests::randomRectangle); + xContentTest(GeometryTestUtils::randomRectangle); } public void testGeometryCollection() throws IOException { @@ -129,143 +126,4 @@ public void testGeometryCollection() throws IOException { public void testCircle() throws IOException { xContentTest(() -> randomCircle(randomBoolean())); } - - public static double randomLat() { - return randomDoubleBetween(-90, 90, true); - } - - public static double randomLon() { - return randomDoubleBetween(-180, 180, true); - } - - public static Circle randomCircle(boolean hasAlt) { - if (hasAlt) { - return new Circle(randomDoubleBetween(-90, 90, true), randomDoubleBetween(-180, 180, true), randomDouble(), - randomDoubleBetween(0, 100, false)); - } else { - return new Circle(randomDoubleBetween(-90, 90, true), randomDoubleBetween(-180, 180, true), randomDoubleBetween(0, 100, false)); - } - } - - public static Line randomLine(boolean hasAlts) { - int size = randomIntBetween(2, 10); - double[] lats = new double[size]; - double[] lons = new double[size]; - double[] alts = hasAlts ? new double[size] : null; - for (int i = 0; i < size; i++) { - lats[i] = randomLat(); - lons[i] = randomLon(); - if (hasAlts) { - alts[i] = randomDouble(); - } - } - if (hasAlts) { - return new Line(lats, lons, alts); - } - return new Line(lats, lons); - } - - public static Point randomPoint(boolean hasAlt) { - if (hasAlt) { - return new Point(randomLat(), randomLon(), randomDouble()); - } else { - return new Point(randomLat(), randomLon()); - } - } - - public static MultiPoint randomMultiPoint(boolean hasAlt) { - int size = randomIntBetween(3, 10); - List points = new ArrayList<>(); - for (int i = 0; i < size; i++) { - points.add(randomPoint(hasAlt)); - } - return new MultiPoint(points); - } - - public static MultiLine randomMultiLine(boolean hasAlt) { - int size = randomIntBetween(3, 10); - List lines = new ArrayList<>(); - for (int i = 0; i < size; i++) { - lines.add(randomLine(hasAlt)); - } - return new MultiLine(lines); - } - - public static MultiPolygon randomMultiPolygon(boolean hasAlt) { - int size = randomIntBetween(3, 10); - List polygons = new ArrayList<>(); - for (int i = 0; i < size; i++) { - polygons.add(randomPolygon(hasAlt)); - } - return new MultiPolygon(polygons); - } - - public static LinearRing randomLinearRing(boolean hasAlt) { - int size = randomIntBetween(3, 10); - double[] lats = new double[size + 1]; - double[] lons = new double[size + 1]; - double[] alts; - if (hasAlt) { - alts = new double[size + 1]; - } else { - alts = null; - } - for (int i = 0; i < size; i++) { - lats[i] = randomLat(); - lons[i] = randomLon(); - if (hasAlt) { - alts[i] = randomDouble(); - } - } - lats[size] = lats[0]; - lons[size] = lons[0]; - if (hasAlt) { - alts[size] = alts[0]; - return new LinearRing(lats, lons, alts); - } else { - return new LinearRing(lats, lons); - } - } - - public static Polygon randomPolygon(boolean hasAlt) { - int size = randomIntBetween(0, 10); - List holes = new ArrayList<>(); - for (int i = 0; i < size; i++) { - holes.add(randomLinearRing(hasAlt)); - } - if (holes.size() > 0) { - return new Polygon(randomLinearRing(hasAlt), holes); - } else { - return new Polygon(randomLinearRing(hasAlt)); - } - } - - public static Rectangle randomRectangle() { - double lat1 = randomLat(); - double lat2 = randomLat(); - double minLon = randomLon(); - double maxLon = randomLon(); - return new Rectangle(Math.min(lat1, lat2), Math.max(lat1, lat2), minLon, maxLon); - } - - public static GeometryCollection randomGeometryCollection(boolean hasAlt) { - return randomGeometryCollection(0, hasAlt); - } - - private static GeometryCollection randomGeometryCollection(int level, boolean hasAlt) { - int size = randomIntBetween(1, 10); - List shapes = new ArrayList<>(); - for (int i = 0; i < size; i++) { - @SuppressWarnings("unchecked") Function geometry = randomFrom( - GeoJsonSerializationTests::randomCircle, - GeoJsonSerializationTests::randomLine, - GeoJsonSerializationTests::randomPoint, - GeoJsonSerializationTests::randomPolygon, - hasAlt ? GeoJsonSerializationTests::randomPoint : (b) -> randomRectangle(), - level < 3 ? (b) -> randomGeometryCollection(level + 1, b) : GeoJsonSerializationTests::randomPoint // don't build too deep - ); - shapes.add(geometry.apply(hasAlt)); - } - return new GeometryCollection<>(shapes); - } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java index 5806875ce109b..ccfc599f4cadc 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java @@ -54,6 +54,7 @@ import org.locationtech.spatial4j.shape.jts.JtsPoint; import java.io.IOException; +import java.text.ParseException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -68,7 +69,7 @@ public class GeoJsonShapeParserTests extends BaseGeoParsingTestCase { @Override - public void testParsePoint() throws IOException { + public void testParsePoint() throws IOException, ParseException { XContentBuilder pointGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "Point") @@ -80,7 +81,7 @@ public void testParsePoint() throws IOException { } @Override - public void testParseLineString() throws IOException { + public void testParseLineString() throws IOException, ParseException { XContentBuilder lineGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "LineString") @@ -102,12 +103,12 @@ public void testParseLineString() throws IOException { try (XContentParser parser = createParser(lineGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertLineString(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertLineString(parse(parser), false); } } @Override - public void testParseMultiLineString() throws IOException { + public void testParseMultiLineString() throws IOException, ParseException { XContentBuilder multilinesGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "MultiLineString") @@ -140,7 +141,7 @@ public void testParseMultiLineString() throws IOException { multilinesGeoJson, false); } - public void testParseCircle() throws IOException { + public void testParseCircle() throws IOException, ParseException { XContentBuilder multilinesGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "circle") @@ -182,7 +183,7 @@ public void testParseMultiDimensionShapes() throws IOException { } @Override - public void testParseEnvelope() throws IOException { + public void testParseEnvelope() throws IOException, ParseException { // test #1: envelope with expected coordinate order (TopLeft, BottomRight) XContentBuilder multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope") .startArray("coordinates") @@ -235,7 +236,7 @@ public void testParseEnvelope() throws IOException { } @Override - public void testParsePolygon() throws IOException { + public void testParsePolygon() throws IOException, ParseException { XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "Polygon") @@ -268,7 +269,7 @@ public void testParsePolygon() throws IOException { assertGeometryEquals(p, polygonGeoJson, false); } - public void testParse3DPolygon() throws IOException { + public void testParse3DPolygon() throws IOException, ParseException { XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "Polygon") @@ -485,7 +486,7 @@ public void testParseInvalidDimensionalMultiPolygon() throws IOException { } - public void testParseOGCPolygonWithoutHoles() throws IOException { + public void testParseOGCPolygonWithoutHoles() throws IOException, ParseException { // test 1: ccw poly not crossing dateline String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon") .startArray("coordinates") @@ -508,7 +509,7 @@ public void testParseOGCPolygonWithoutHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertPolygon(parse(parser), false); } // test 2: ccw poly crossing dateline @@ -533,7 +534,7 @@ public void testParseOGCPolygonWithoutHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false); } // test 3: cw poly not crossing dateline @@ -558,7 +559,7 @@ public void testParseOGCPolygonWithoutHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertPolygon(parse(parser), false); } // test 4: cw poly crossing dateline @@ -583,11 +584,11 @@ public void testParseOGCPolygonWithoutHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false); } } - public void testParseOGCPolygonWithHoles() throws IOException { + public void testParseOGCPolygonWithHoles() throws IOException, ParseException { // test 1: ccw poly not crossing dateline String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon") .startArray("coordinates") @@ -616,7 +617,7 @@ public void testParseOGCPolygonWithHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertPolygon(parse(parser), false); } // test 2: ccw poly crossing dateline @@ -647,7 +648,7 @@ public void testParseOGCPolygonWithHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false); } // test 3: cw poly not crossing dateline @@ -678,7 +679,7 @@ public void testParseOGCPolygonWithHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertPolygon(parse(parser), false); } // test 4: cw poly crossing dateline @@ -709,7 +710,7 @@ public void testParseOGCPolygonWithHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false); } } @@ -816,7 +817,7 @@ public void testParseInvalidPolygon() throws IOException { } } - public void testParsePolygonWithHole() throws IOException { + public void testParsePolygonWithHole() throws IOException, ParseException { XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "Polygon") @@ -894,7 +895,7 @@ public void testParseSelfCrossingPolygon() throws IOException { } @Override - public void testParseMultiPoint() throws IOException { + public void testParseMultiPoint() throws IOException, ParseException { XContentBuilder multiPointGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "MultiPoint") @@ -914,7 +915,7 @@ public void testParseMultiPoint() throws IOException { } @Override - public void testParseMultiPolygon() throws IOException { + public void testParseMultiPolygon() throws IOException, ParseException { // test #1: two polygons; one without hole, one with hole XContentBuilder multiPolygonGeoJson = XContentFactory.jsonBuilder() .startObject() @@ -1043,14 +1044,14 @@ public void testParseMultiPolygon() throws IOException { new org.elasticsearch.geo.geometry.LinearRing( new double[] {0.8d, 0.2d, 0.2d, 0.8d, 0.8d}, new double[] {100.8d, 100.8d, 100.2d, 100.2d, 100.8d}); - org.elasticsearch.geo.geometry.MultiPolygon lucenePolygons = new org.elasticsearch.geo.geometry.MultiPolygon( - Collections.singletonList(new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing( - new double[] {0d, 0d, 1d, 1d, 0d}, new double[] {100d, 101d, 101d, 100d, 100d}), Collections.singletonList(luceneHole)))); + org.elasticsearch.geo.geometry.Polygon lucenePolygons = (new org.elasticsearch.geo.geometry.Polygon( + new org.elasticsearch.geo.geometry.LinearRing( + new double[] {0d, 0d, 1d, 1d, 0d}, new double[] {100d, 101d, 101d, 100d, 100d}), Collections.singletonList(luceneHole))); assertGeometryEquals(lucenePolygons, multiPolygonGeoJson, false); } @Override - public void testParseGeometryCollection() throws IOException { + public void testParseGeometryCollection() throws IOException, ParseException { XContentBuilder geometryCollectionGeoJson = XContentFactory.jsonBuilder() .startObject() .field("type", "GeometryCollection") @@ -1138,7 +1139,7 @@ public void testParseGeometryCollection() throws IOException { assertGeometryEquals(geometryExpected, geometryCollectionGeoJson, false); } - public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() throws IOException { + public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() throws IOException, ParseException { XContentBuilder pointGeoJson = XContentFactory.jsonBuilder() .startObject() .startObject("crs") @@ -1161,7 +1162,7 @@ public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() t assertGeometryEquals(expectedPt, pointGeoJson, false); } - public void testParseOrientationOption() throws IOException { + public void testParseOrientationOption() throws IOException, ParseException { // test 1: valid ccw (right handed system) poly not crossing dateline (with 'right' field) XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() @@ -1193,7 +1194,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertPolygon(parse(parser), false); } // test 2: valid ccw (right handed system) poly not crossing dateline (with 'ccw' field) @@ -1227,7 +1228,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertPolygon(parse(parser), false); } // test 3: valid ccw (right handed system) poly not crossing dateline (with 'counterclockwise' field) @@ -1261,7 +1262,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertPolygon(parse(parser), false); } // test 4: valid cw (left handed system) poly crossing dateline (with 'left' field) @@ -1295,7 +1296,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false); } // test 5: valid cw multipoly (left handed system) poly crossing dateline (with 'cw' field) @@ -1329,7 +1330,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false); } // test 6: valid cw multipoly (left handed system) poly crossing dateline (with 'clockwise' field) @@ -1363,7 +1364,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false); } } @@ -1421,4 +1422,10 @@ public void testParseInvalidGeometryCollectionShapes() throws IOException { assertNull(parser.nextToken()); // no more elements afterwards } } + + public Geometry parse(XContentParser parser) throws IOException, ParseException { + GeometryParser geometryParser = new GeometryParser(true, true, true); + GeometryIndexer indexer = new GeometryIndexer(true); + return indexer.prepareForIndexing(geometryParser.parse(parser)); + } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 5835ab6a06c14..44016145750ed 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; import org.elasticsearch.geo.geometry.Line; import org.elasticsearch.geo.geometry.MultiLine; import org.elasticsearch.geo.geometry.MultiPoint; @@ -62,6 +63,7 @@ import org.locationtech.spatial4j.shape.jts.JtsPoint; import java.io.IOException; +import java.text.ParseException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -91,7 +93,7 @@ private static XContentBuilder toWKTContent(ShapeBuilder builder, boole return XContentFactory.jsonBuilder().value(wkt); } - private void assertExpected(Object expected, ShapeBuilder builder, boolean useJTS) throws IOException { + private void assertExpected(Object expected, ShapeBuilder builder, boolean useJTS) throws IOException, ParseException { XContentBuilder xContentBuilder = toWKTContent(builder, false); assertGeometryEquals(expected, xContentBuilder, useJTS); } @@ -102,7 +104,7 @@ private void assertMalformed(ShapeBuilder builder) throws IOException { } @Override - public void testParsePoint() throws IOException { + public void testParsePoint() throws IOException, ParseException { GeoPoint p = RandomShapeGenerator.randomPoint(random()); Coordinate c = new Coordinate(p.lon(), p.lat()); Point expected = GEOMETRY_FACTORY.createPoint(c); @@ -112,7 +114,7 @@ public void testParsePoint() throws IOException { } @Override - public void testParseMultiPoint() throws IOException { + public void testParseMultiPoint() throws IOException, ParseException { int numPoints = randomIntBetween(0, 100); List coordinates = new ArrayList<>(numPoints); for (int i = 0; i < numPoints; ++i) { @@ -130,6 +132,9 @@ public void testParseMultiPoint() throws IOException { if (numPoints == 0) { expectedGeom = MultiPoint.EMPTY; actual = new MultiPointBuilder(); + } else if (numPoints == 1) { + expectedGeom = points.get(0); + actual = new MultiPointBuilder(coordinates); } else { expectedGeom = new MultiPoint(points); actual = new MultiPointBuilder(coordinates); @@ -160,7 +165,7 @@ private List randomLineStringCoords() { } @Override - public void testParseLineString() throws IOException { + public void testParseLineString() throws IOException, ParseException { List coordinates = randomLineStringCoords(); LineString expected = GEOMETRY_FACTORY.createLineString(coordinates.toArray(new Coordinate[coordinates.size()])); assertExpected(jtsGeom(expected), new LineStringBuilder(coordinates), true); @@ -175,7 +180,7 @@ public void testParseLineString() throws IOException { } @Override - public void testParseMultiLineString() throws IOException { + public void testParseMultiLineString() throws IOException, ParseException { int numLineStrings = randomIntBetween(0, 8); List lineStrings = new ArrayList<>(numLineStrings); MultiLineStringBuilder builder = new MultiLineStringBuilder(); @@ -194,7 +199,7 @@ public void testParseMultiLineString() throws IOException { } Geometry expectedGeom; if (lines.isEmpty()) { - expectedGeom = MultiLine.EMPTY; + expectedGeom = GeometryCollection.EMPTY; } else if (lines.size() == 1) { expectedGeom = new Line(lines.get(0).getLats(), lines.get(0).getLons()); } else { @@ -210,7 +215,7 @@ public void testParseMultiLineString() throws IOException { } @Override - public void testParsePolygon() throws IOException { + public void testParsePolygon() throws IOException, ParseException { PolygonBuilder builder = PolygonBuilder.class.cast( RandomShapeGenerator.createShape(random(), RandomShapeGenerator.ShapeType.POLYGON)); Coordinate[] coords = builder.coordinates()[0][0]; @@ -222,7 +227,7 @@ public void testParsePolygon() throws IOException { } @Override - public void testParseMultiPolygon() throws IOException { + public void testParseMultiPolygon() throws IOException, ParseException { int numPolys = randomIntBetween(0, 8); MultiPolygonBuilder builder = new MultiPolygonBuilder(); PolygonBuilder pb; @@ -242,7 +247,7 @@ public void testParseMultiPolygon() throws IOException { assertMalformed(builder); } - public void testParsePolygonWithHole() throws IOException { + public void testParsePolygonWithHole() throws IOException, ParseException { // add 3d point to test ISSUE #10501 List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0)); @@ -279,7 +284,7 @@ public void testParsePolygonWithHole() throws IOException { assertMalformed(polygonWithHole); } - public void testParseMixedDimensionPolyWithHole() throws IOException { + public void testParseMixedDimensionPolyWithHole() throws IOException, ParseException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0)); shellCoordinates.add(new Coordinate(101, 0)); @@ -436,7 +441,7 @@ public void testMalformedWKT() throws IOException { } @Override - public void testParseEnvelope() throws IOException { + public void testParseEnvelope() throws IOException, ParseException { org.apache.lucene.geo.Rectangle r = GeoTestUtil.nextBox(); EnvelopeBuilder builder = new EnvelopeBuilder(new Coordinate(r.minLon, r.maxLat), new Coordinate(r.maxLon, r.minLat)); @@ -452,7 +457,7 @@ public void testInvalidGeometryType() throws IOException { } @Override - public void testParseGeometryCollection() throws IOException { + public void testParseGeometryCollection() throws IOException, ParseException { if (rarely()) { // assert empty shape collection GeometryCollectionBuilder builder = new GeometryCollectionBuilder(); diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java new file mode 100644 index 0000000000000..5ab5aaff33e05 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java @@ -0,0 +1,239 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.utils.WellKnownText; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.text.ParseException; +import java.util.Arrays; +import java.util.Collections; + +public class GeometryIndexerTests extends ESTestCase { + + GeometryIndexer indexer = new GeometryIndexer(true); + private static final WellKnownText WKT = new WellKnownText(true, geometry -> { + }); + + + public void testCircle() { + UnsupportedOperationException ex = + expectThrows(UnsupportedOperationException.class, () -> indexer.prepareForIndexing(new Circle(1, 2, 3))); + assertEquals("CIRCLE geometry is not supported", ex.getMessage()); + } + + public void testCollection() { + assertEquals(GeometryCollection.EMPTY, indexer.prepareForIndexing(GeometryCollection.EMPTY)); + + GeometryCollection collection = new GeometryCollection<>(Collections.singletonList( + new Point(1, 2) + )); + + Geometry indexed = new Point(1, 2); + assertEquals(indexed, indexer.prepareForIndexing(collection)); + + collection = new GeometryCollection<>(Arrays.asList( + new Point(1, 2), new Point(3, 4), new Line(new double[]{10, 20}, new double[]{160, 200}) + )); + + indexed = new GeometryCollection<>(Arrays.asList( + new Point(1, 2), new Point(3, 4), + new MultiLine(Arrays.asList( + new Line(new double[]{10, 15}, new double[]{160, 180}), + new Line(new double[]{15, 20}, new double[]{180, -160})) + )) + ); + assertEquals(indexed, indexer.prepareForIndexing(collection)); + + } + + public void testLine() { + Line line = new Line(new double[]{1, 2}, new double[]{3, 4}); + Geometry indexed = line; + assertEquals(indexed, indexer.prepareForIndexing(line)); + + line = new Line(new double[]{10, 20}, new double[]{160, 200}); + indexed = new MultiLine(Arrays.asList( + new Line(new double[]{10, 15}, new double[]{160, 180}), + new Line(new double[]{15, 20}, new double[]{180, -160})) + ); + + assertEquals(indexed, indexer.prepareForIndexing(line)); + } + + public void testMultiLine() { + Line line = new Line(new double[]{1, 2}, new double[]{3, 4}); + MultiLine multiLine = new MultiLine(Collections.singletonList(line)); + Geometry indexed = line; + assertEquals(indexed, indexer.prepareForIndexing(multiLine)); + + multiLine = new MultiLine(Arrays.asList( + line, new Line(new double[]{10, 20}, new double[]{160, 200}) + )); + + indexed = new MultiLine(Arrays.asList( + line, + new Line(new double[]{10, 15}, new double[]{160, 180}), + new Line(new double[]{15, 20}, new double[]{180, -160})) + ); + + assertEquals(indexed, indexer.prepareForIndexing(multiLine)); + } + + public void testPoint() { + Point point = new Point(1, 2); + Geometry indexed = point; + assertEquals(indexed, indexer.prepareForIndexing(point)); + + point = new Point(1, 2, 3); + assertEquals(indexed, indexer.prepareForIndexing(point)); + } + + public void testMultiPoint() { + MultiPoint multiPoint = MultiPoint.EMPTY; + Geometry indexed = multiPoint; + assertEquals(indexed, indexer.prepareForIndexing(multiPoint)); + + multiPoint = new MultiPoint(Collections.singletonList(new Point(1, 2))); + indexed = new Point(1, 2); + assertEquals(indexed, indexer.prepareForIndexing(multiPoint)); + + multiPoint = new MultiPoint(Arrays.asList(new Point(1, 2), new Point(3, 4))); + indexed = multiPoint; + assertEquals(indexed, indexer.prepareForIndexing(multiPoint)); + + multiPoint = new MultiPoint(Arrays.asList(new Point(1, 2, 10), new Point(3, 4, 10))); + assertEquals(indexed, indexer.prepareForIndexing(multiPoint)); + } + + public void testPolygon() { + Polygon polygon = new Polygon(new LinearRing(new double[]{10, 10, 20, 20, 10}, new double[]{160, 200, 200, 160, 160})); + Geometry indexed = new MultiPolygon(Arrays.asList( + new Polygon(new LinearRing(new double[]{10, 20, 20, 10, 10}, new double[]{180, 180, 160, 160, 180})), + new Polygon(new LinearRing(new double[]{20, 10, 10, 20, 20}, new double[]{-180, -180, -160, -160, -180})) + )); + + assertEquals(indexed, indexer.prepareForIndexing(polygon)); + + polygon = new Polygon(new LinearRing(new double[]{10, 10, 20, 20, 10}, new double[]{160, 200, 200, 160, 160}), + Collections.singletonList( + new LinearRing(new double[]{12, 18, 18, 12, 12}, new double[]{165, 165, 195, 195, 165}))); + + indexed = new MultiPolygon(Arrays.asList( + new Polygon(new LinearRing( + new double[]{10, 12, 12, 18, 18, 20, 20, 10, 10}, + new double[]{180, 180, 165, 165, 180, 180, 160, 160, 180})), + new Polygon(new LinearRing( + new double[]{12, 10, 10, 20, 20, 18, 18, 12, 12}, + new double[]{-180, -180, -160, -160, -180, -180, -165, -165, -180})) + )); + + assertEquals(indexed, indexer.prepareForIndexing(polygon)); + } + + public void testPolygonOrientation() throws IOException, ParseException { + assertEquals(expected("POLYGON ((160 10, -160 10, -160 0, 160 0, 160 10))"), // current algorithm shifts edges to left + actual("POLYGON ((160 0, 160 10, -160 10, -160 0, 160 0))", randomBoolean())); // In WKT the orientation is ignored + + assertEquals(expected("POLYGON ((20 10, -20 10, -20 0, 20 0, 20 10)))"), + actual("POLYGON ((20 0, 20 10, -20 10, -20 0, 20 0))", randomBoolean())); + + assertEquals(expected("POLYGON ((160 10, -160 10, -160 0, 160 0, 160 10))"), + actual(polygon(null, 160, 0, 160, 10, -160, 10, -160, 0, 160, 0), true)); + + assertEquals(expected("MULTIPOLYGON (((180 0, 180 10, 160 10, 160 0, 180 0)), ((-180 10, -180 0, -160 0, -160 10, -180 10)))"), + actual(polygon(randomBoolean() ? null : false, 160, 0, 160, 10, -160, 10, -160, 0, 160, 0), false)); + + assertEquals(expected("MULTIPOLYGON (((180 0, 180 10, 160 10, 160 0, 180 0)), ((-180 10, -180 0, -160 0, -160 10, -180 10)))"), + actual(polygon(false, 160, 0, 160, 10, -160, 10, -160, 0, 160, 0), true)); + + assertEquals(expected("POLYGON ((20 10, -20 10, -20 0, 20 0, 20 10)))"), + actual(polygon(randomBoolean() ? null : randomBoolean(), 20, 0, 20, 10, -20, 10, -20, 0, 20, 0), randomBoolean())); + } + + private XContentBuilder polygon(Boolean orientation, double... val) throws IOException { + XContentBuilder pointGeoJson = XContentFactory.jsonBuilder().startObject(); + { + pointGeoJson.field("type", "polygon"); + if (orientation != null) { + pointGeoJson.field("orientation", orientation ? "right" : "left"); + } + pointGeoJson.startArray("coordinates").startArray(); + { + assertEquals(0, val.length % 2); + for (int i = 0; i < val.length; i += 2) { + pointGeoJson.startArray().value(val[i]).value(val[i + 1]).endArray(); + } + } + pointGeoJson.endArray().endArray(); + } + pointGeoJson.endObject(); + return pointGeoJson; + } + + private Geometry expected(String wkt) throws IOException, ParseException { + return parseGeometry(wkt, true); + } + + private Geometry actual(String wkt, boolean rightOrientation) throws IOException, ParseException { + Geometry shape = parseGeometry(wkt, rightOrientation); + return new GeometryIndexer(true).prepareForIndexing(shape); + } + + + private Geometry actual(XContentBuilder geoJson, boolean rightOrientation) throws IOException, ParseException { + Geometry shape = parseGeometry(geoJson, rightOrientation); + return new GeometryIndexer(true).prepareForIndexing(shape); + } + + private Geometry parseGeometry(String wkt, boolean rightOrientation) throws IOException, ParseException { + XContentBuilder json = XContentFactory.jsonBuilder().startObject().field("value", wkt).endObject(); + try (XContentParser parser = createParser(json)) { + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + GeometryParser geometryParser = new GeometryParser(rightOrientation, true, true); + return geometryParser.parse(parser); + } + } + + private Geometry parseGeometry(XContentBuilder geoJson, boolean rightOrientation) throws IOException, ParseException { + try (XContentParser parser = createParser(geoJson)) { + parser.nextToken(); + GeometryParser geometryParser = new GeometryParser(rightOrientation, true, true); + return geometryParser.parse(parser); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java index e3db70fc24e2f..4cef86b1d570e 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.geo.geometry.Line; import org.elasticsearch.geo.geometry.LinearRing; import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.Polygon; @@ -114,6 +115,20 @@ public void testWKTParsing() throws Exception { newGeoJson.endObject(); assertEquals("{\"val\":\"point (100.0 10.0)\"}", Strings.toString(newGeoJson)); } + + // Make sure we can parse values outside the normal lat lon boundaries + XContentBuilder lineGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("foo", "LINESTRING (100 0, 200 10)") + .endObject(); + + try (XContentParser parser = createParser(lineGeoJson)) { + parser.nextToken(); // Start object + parser.nextToken(); // Field Name + parser.nextToken(); // Field Value + assertEquals(new Line(new double[]{0, 10}, new double[]{100, 200} ), + new GeometryParser(true, randomBoolean(), randomBoolean()).parse(parser)); + } } public void testNullParsing() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java index b0b11afa97c62..48985ffbee3dd 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -19,10 +19,9 @@ package org.elasticsearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; - import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; +import org.locationtech.jts.geom.Coordinate; import java.io.IOException; import java.util.List; diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index 948e29d5d67de..72572c7efc8d5 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -430,20 +430,20 @@ public String getWriteableName() { } } - public void testWriteStreamableList() throws IOException { + public void testWriteWriteableList() throws IOException { final int size = randomIntBetween(0, 5); - final List expected = new ArrayList<>(size); + final List expected = new ArrayList<>(size); for (int i = 0; i < size; ++i) { - expected.add(new TestStreamable(randomBoolean())); + expected.add(new TestWriteable(randomBoolean())); } final BytesStreamOutput out = new BytesStreamOutput(); - out.writeStreamableList(expected); + out.writeList(expected); final StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - final List loaded = in.readStreamableList(TestStreamable::new); + final List loaded = in.readList(TestWriteable::new); assertThat(loaded, hasSize(expected.size())); @@ -587,18 +587,15 @@ public void testReadWriteGeoPoint() throws IOException { } } - private static class TestStreamable implements Streamable { + private static class TestWriteable implements Writeable { private boolean value; - TestStreamable() { } - - TestStreamable(boolean value) { + TestWriteable(boolean value) { this.value = value; } - @Override - public void readFrom(StreamInput in) throws IOException { + TestWriteable(StreamInput in) throws IOException { value = in.readBoolean(); } diff --git a/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java b/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java index 740430ac0993b..ad1412dfa44e0 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.spi.ExtendedLogger; import org.apache.logging.log4j.spi.LoggerContext; import org.apache.logging.log4j.spi.LoggerContextFactory; +import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; @@ -56,6 +57,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; import static org.hamcrest.core.Is.is; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -318,7 +320,7 @@ public void testWarningHeaderSizeSetting() throws IOException{ assertTrue(warningHeadersSize <= 1024); } } - + @SuppressLoggerChecks(reason = "Safe as this is using mockito") public void testLogPermissions() { AtomicBoolean supplierCalled = new AtomicBoolean(false); @@ -330,7 +332,7 @@ public void testLogPermissions() { supplierCalled.set(true); createTempDir(); // trigger file permission, like rolling logs would return null; - }).when(mockLogger).warn("foo", new Object[] {"bar"}); + }).when(mockLogger).warn(new DeprecatedMessage("foo", any())); final LoggerContext context = new SimpleLoggerContext() { @Override public ExtendedLogger getLogger(String name) { diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index c374984eb5d15..42c846d01f5a5 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -185,12 +185,4 @@ public void testMutuallyExclusiveScopes() { assertThat(e.getMessage(), containsString("Cannot register setting [foo.bar] twice")); } } - - public void testOldMaxClauseCountSetting() { - Settings settings = Settings.builder().put("index.query.bool.max_clause_count", 1024).build(); - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new SettingsModule(settings)); - assertEquals("unknown setting [index.query.bool.max_clause_count] did you mean [indices.query.bool.max_clause_count]?", - ex.getMessage()); - } } diff --git a/server/src/test/java/org/elasticsearch/common/transport/BoundTransportAddressTests.java b/server/src/test/java/org/elasticsearch/common/transport/BoundTransportAddressTests.java index a3a1178473dd2..bb6235cc8165f 100644 --- a/server/src/test/java/org/elasticsearch/common/transport/BoundTransportAddressTests.java +++ b/server/src/test/java/org/elasticsearch/common/transport/BoundTransportAddressTests.java @@ -52,13 +52,7 @@ public void testSerialization() throws Exception { transportAddress.writeTo(streamOutput); StreamInput in = streamOutput.bytes().streamInput(); - BoundTransportAddress serializedAddress; - if (randomBoolean()) { - serializedAddress = BoundTransportAddress.readBoundTransportAddress(in); - } else { - serializedAddress = new BoundTransportAddress(); - serializedAddress.readFrom(in); - } + BoundTransportAddress serializedAddress = new BoundTransportAddress(in); assertThat(serializedAddress, not(sameInstance(transportAddress))); assertThat(serializedAddress.boundAddresses().length, equalTo(transportAddress.boundAddresses().length)); diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index c973d89c4d729..749e0d96929c2 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -52,7 +52,7 @@ import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import java.util.ArrayList; import java.util.Collections; @@ -105,11 +105,11 @@ static ConflictMode randomMode() { *

* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates */ - @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + + @TestIssueLogging(value = "_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + - "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") - // TestLogging for https://github.com/elastic/elasticsearch/issues/41068 + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE", + issueUrl = "https://github.com/elastic/elasticsearch/issues/41068") public void testAckedIndexing() throws Exception { final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5; @@ -507,4 +507,5 @@ public void testRestartNodeWhileIndexing() throws Exception { ackedDocs, everyItem(isIn(docs))); } } + } diff --git a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java index 33397ae580282..b4ead893846ba 100644 --- a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.PeerFinder.TransportAddressConnector; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; @@ -214,11 +215,9 @@ public void setup() { = new ConnectionManager(settings, capturingTransport); StubbableConnectionManager connectionManager = new StubbableConnectionManager(innerConnectionManager, settings, capturingTransport); - connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> { - final boolean isConnected = connectedNodes.contains(discoveryNode); - final boolean isDisconnected = disconnectedNodes.contains(discoveryNode); - assert isConnected != isDisconnected : discoveryNode + ": isConnected=" + isConnected + ", isDisconnected=" + isDisconnected; - return isConnected; + connectionManager.setDefaultNodeConnectedBehavior(cm -> { + assertTrue(Sets.haveEmptyIntersection(connectedNodes, disconnectedNodes)); + return connectedNodes; }); connectionManager.setDefaultGetConnectionBehavior((cm, discoveryNode) -> capturingTransport.createConnection(discoveryNode)); transportService = new TransportService(settings, capturingTransport, deterministicTaskQueue.getThreadPool(), diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 0e2f34b8e557e..46851aea77b71 100644 --- a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.discovery; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -41,7 +42,6 @@ import org.elasticsearch.test.disruption.NetworkDisruption.NetworkUnresponsive; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.SingleNodeDisruption; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import java.util.ArrayList; @@ -173,8 +173,6 @@ private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. */ - @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") - // TestLogging for https://github.com/elastic/elasticsearch/issues/43392 public void testStaleMasterNotHijackingMajority() throws Exception { final List nodes = internalCluster().startNodes(3, Settings.builder() .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 1d8abcc0e6f11..32a778c46356c 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.NodeConfigurationSource; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportService; @@ -59,7 +60,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .builder() .put(super.nodeSettings(nodeOrdinal)) .put("discovery.type", "single-node") - .put("transport.port", "0") + .put("transport.port", MockTransportService.getPortRange()) .build(); } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index bfc45b3118800..63408c2669131 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -220,11 +220,13 @@ public void testJustMasterNode() throws Exception { logger.info("--> create an index"); client().admin().indices().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).execute().actionGet(); - logger.info("--> closing master node"); - internalCluster().closeNonSharedNodes(false); - - logger.info("--> starting 1 master node non data again"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build()); + logger.info("--> restarting master node"); + internalCluster().fullRestart(new RestartCallback(){ + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build(); + } + }); logger.info("--> waiting for test index to be created"); ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test") @@ -236,7 +238,7 @@ public void testJustMasterNode() throws Exception { assertThat(clusterStateResponse.getState().metaData().hasIndex("test"), equalTo(true)); } - public void testJustMasterNodeAndJustDataNode() throws Exception { + public void testJustMasterNodeAndJustDataNode() { logger.info("--> cleaning nodes"); logger.info("--> starting 1 master node non data"); diff --git a/server/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/server/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java deleted file mode 100644 index 847c1801510a1..0000000000000 --- a/server/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gateway; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.stats.IndexStats; -import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.indices.recovery.RecoveryState; - -import static org.elasticsearch.test.ESIntegTestCase.client; -import static org.elasticsearch.test.ESTestCase.randomBoolean; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; - -/** - * Test of file reuse on recovery shared between integration tests and backwards - * compatibility tests. - */ -public class ReusePeerRecoverySharedTest { - /** - * Test peer reuse on recovery. This is shared between RecoverFromGatewayIT - * and RecoveryBackwardsCompatibilityIT. - * - * @param indexSettings - * settings for the index to test - * @param restartCluster - * runnable that will restart the cluster under test - * @param logger - * logger for logging - * @param useSyncIds - * should this use synced flush? can't use synced from in the bwc - * tests - */ - public static void testCase(Settings indexSettings, Runnable restartCluster, Logger logger, boolean useSyncIds) { - /* - * prevent any rebalance actions during the peer recovery if we run into - * a relocation the reuse count will be 0 and this fails the test. We - * are testing here if we reuse the files on disk after full restarts - * for replicas. - */ - assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(indexSettings) - .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE))); - client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); - logger.info("--> indexing docs"); - for (int i = 0; i < 1000; i++) { - client().prepareIndex("test", "type").setSource("field", "value").execute().actionGet(); - if ((i % 200) == 0) { - client().admin().indices().prepareFlush().execute().actionGet(); - } - } - if (randomBoolean()) { - client().admin().indices().prepareFlush().execute().actionGet(); - } - logger.info("--> running cluster health"); - client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); - // just wait for merges - client().admin().indices().prepareForceMerge("test").setMaxNumSegments(100).get(); - client().admin().indices().prepareFlush().setForce(true).get(); - - if (useSyncIds == false) { - logger.info("--> disabling allocation while the cluster is shut down"); - - // Disable allocations while we are closing nodes - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), - EnableAllocationDecider.Allocation.NONE)).get(); - logger.info("--> full cluster restart"); - restartCluster.run(); - - logger.info("--> waiting for cluster to return to green after first shutdown"); - client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); - } else { - logger.info("--> trying to sync flush"); - assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0); - assertSyncIdsNotNull(); - } - - logger.info("--> disabling allocation while the cluster is shut down{}", useSyncIds ? "" : " a second time"); - // Disable allocations while we are closing nodes - client().admin().cluster().prepareUpdateSettings().setTransientSettings( - Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), - EnableAllocationDecider.Allocation.NONE)) - .get(); - logger.info("--> full cluster restart"); - restartCluster.run(); - - logger.info("--> waiting for cluster to return to green after {}shutdown", useSyncIds ? "" : "second "); - client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); - - if (useSyncIds) { - assertSyncIdsNotNull(); - } - RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); - for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { - long recovered = 0; - for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) { - if (file.name().startsWith("segments")) { - recovered += file.length(); - } - } - if (!recoveryState.getPrimary() && (useSyncIds == false)) { - logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}", recoveryState.getShardId().getId(), - recoveryState.getSourceNode().getName(), recoveryState.getTargetNode().getName(), - recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); - assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered)); - assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0L)); - // we have to recover the segments file since we commit the translog ID on engine startup - assertThat("all bytes should be reused except of the segments file", recoveryState.getIndex().reusedBytes(), - equalTo(recoveryState.getIndex().totalBytes() - recovered)); - assertThat("no files should be recovered except of the segments file", recoveryState.getIndex().recoveredFileCount(), - equalTo(1)); - assertThat("all files should be reused except of the segments file", recoveryState.getIndex().reusedFileCount(), - equalTo(recoveryState.getIndex().totalFileCount() - 1)); - assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0)); - } else { - if (useSyncIds && !recoveryState.getPrimary()) { - logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}", - recoveryState.getShardId().getId(), recoveryState.getSourceNode().getName(), - recoveryState.getTargetNode().getName(), - recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); - } - assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0L)); - assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes())); - assertThat(recoveryState.getIndex().recoveredFileCount(), equalTo(0)); - assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount())); - } - } - } - - public static void assertSyncIdsNotNull() { - IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 72a1cb4a87d7f..49747993ec1fa 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.IndexingSlowLog.SlowLogParsedDocumentPrinter; +import org.elasticsearch.index.IndexingSlowLog.IndexingSlowLogMessage; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.test.ESTestCase; @@ -38,13 +38,39 @@ import java.io.UncheckedIOException; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; public class IndexingSlowLogTests extends ESTestCase { + + public void testSlowLogMessageHasJsonFields() throws IOException { + BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder() + .startObject().field("foo", "bar").endObject()); + ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), + SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", + "test", "routingValue", null, source, XContentType.JSON, null); + Index index = new Index("foo", "123"); + // Turning off document logging doesn't log source[] + IndexingSlowLogMessage p = new IndexingSlowLogMessage(index, pd, 10, true, 0); + + assertThat(p.getValueFor("message"),equalTo("[foo/123]")); + assertThat(p.getValueFor("took"),equalTo("10nanos")); + assertThat(p.getValueFor("took_millis"),equalTo("0")); + assertThat(p.getValueFor("doc_type"),equalTo("test")); + assertThat(p.getValueFor("id"),equalTo("id")); + assertThat(p.getValueFor("routing"),equalTo("routingValue")); + assertThat(p.getValueFor("source"), is(emptyOrNullString())); + + // Turning on document logging logs the whole thing + p = new IndexingSlowLogMessage(index, pd, 10, true, Integer.MAX_VALUE); + assertThat(p.getValueFor("source"), containsString("{\\\"foo\\\":\\\"bar\\\"}")); + } + public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder() .startObject().field("foo", "bar").endObject()); @@ -53,32 +79,32 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { "test", null, null, source, XContentType.JSON, null); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] - SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 0); - assertThat(p.toString(), not(containsString("source["))); + IndexingSlowLogMessage p = new IndexingSlowLogMessage(index, pd, 10, true, 0); + assertThat(p.getFormattedMessage(), not(containsString("source["))); // Turning on document logging logs the whole thing - p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, Integer.MAX_VALUE); - assertThat(p.toString(), containsString("source[{\"foo\":\"bar\"}]")); + p = new IndexingSlowLogMessage(index, pd, 10, true, Integer.MAX_VALUE); + assertThat(p.getFormattedMessage(), containsString("source[{\"foo\":\"bar\"}]")); // And you can truncate the source - p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); - assertThat(p.toString(), containsString("source[{\"f]")); + p = new IndexingSlowLogMessage(index, pd, 10, true, 3); + assertThat(p.getFormattedMessage(), containsString("source[{\"f]")); // And you can truncate the source - p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); - assertThat(p.toString(), containsString("source[{\"f]")); - assertThat(p.toString(), startsWith("[foo/123] took")); + p = new IndexingSlowLogMessage(index, pd, 10, true, 3); + assertThat(p.getFormattedMessage(), containsString("source[{\"f]")); + assertThat(p.getFormattedMessage(), startsWith("[foo/123] took")); // Throwing a error if source cannot be converted source = new BytesArray("invalid"); - pd = new ParsedDocument(new NumericDocValuesField("version", 1), + ParsedDocument doc = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", "test", null, null, source, XContentType.JSON, null); - p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); - final UncheckedIOException e = expectThrows(UncheckedIOException.class, p::toString); + final UncheckedIOException e = expectThrows(UncheckedIOException.class, + ()->new IndexingSlowLogMessage(index, doc, 10, true, 3)); assertThat(e, hasToString(containsString("_failed_to_convert_[Unrecognized token 'invalid':" - + " was expecting ('true', 'false' or 'null')\n" + + " was expecting ('true', 'false' or 'null')\\n" + " at [Source: org.elasticsearch.common.bytes.BytesReference$MarkSupportingStreamInputWrapper"))); assertNotNull(e.getCause()); assertThat(e.getCause(), instanceOf(JsonParseException.class)); @@ -128,22 +154,6 @@ public void testReformatSetting() { assertTrue(log.isReformat()); } - public void testReformatIsFalseAndSourceIsTrim() { - String json = "\n\n{ \"fieldName\": 123 } \n "; - BytesReference source = new BytesArray(json); - ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), - SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", - "test", null, null, source, XContentType.JSON, null); - Index index = new Index("foo", "123"); - // Turning off reformatting so the document is in logs as provided - SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(index, pd, 10, false, 1000); - String logLine = p.toString(); - - //expect the new lines and white characters to be trimmed - assertThat(logLine, containsString("source[{")); - assertThat(logLine.split("\n").length, equalTo(1)); - } - public void testLevelSetting() { SlowLogLevel level = randomFrom(SlowLogLevel.values()); IndexMetaData metaData = newIndexMeta("index", Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 2f682f437a2cb..ce46374cf2716 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -39,12 +39,16 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.threadpool.ThreadPool; +import org.hamcrest.Matchers; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; @@ -53,6 +57,9 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase { @Override protected SearchContext createSearchContext(IndexService indexService) { + return createSearchContext(indexService, new String[]{}); + } + protected SearchContext createSearchContext(IndexService indexService, String ... groupStats) { BigArrays bigArrays = indexService.getBigArrays(); ThreadPool threadPool = indexService.getThreadPool(); return new TestSearchContext(bigArrays, indexService) { @@ -143,6 +150,12 @@ public String getClusterAlias() { return null; } }; + + @Override + public List groupStats() { + return Arrays.asList(groupStats); + } + @Override public ShardSearchRequest request() { return request; @@ -150,6 +163,45 @@ public ShardSearchRequest request() { }; } + public void testSlowLogHasJsonFields() throws IOException { + IndexService index = createIndex("foo"); + SearchContext searchContext = createSearchContext(index); + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + searchContext.request().source(source); + searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, + Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"))); + SearchSlowLog.SearchSlowLogMessage p = new SearchSlowLog.SearchSlowLogMessage(searchContext, 10); + + assertThat(p.getValueFor("message"), equalTo("[foo][0]")); + assertThat(p.getValueFor("took"), equalTo("10nanos")); + assertThat(p.getValueFor("took_millis"), equalTo("0")); + assertThat(p.getValueFor("total_hits"), equalTo("-1")); + assertThat(p.getValueFor("stats"), equalTo("[]")); + assertThat(p.getValueFor("search_type"), Matchers.nullValue()); + assertThat(p.getValueFor("total_shards"), equalTo("1")); + assertThat(p.getValueFor("source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + } + + public void testSlowLogsWithStats() throws IOException { + IndexService index = createIndex("foo"); + SearchContext searchContext = createSearchContext(index,"group1"); + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + searchContext.request().source(source); + searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, + Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"))); + + SearchSlowLog.SearchSlowLogMessage p = new SearchSlowLog.SearchSlowLogMessage(searchContext, 10); + assertThat(p.getValueFor("stats"), equalTo("[\\\"group1\\\"]")); + + searchContext = createSearchContext(index, "group1", "group2"); + source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + searchContext.request().source(source); + searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, + Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"))); + p = new SearchSlowLog.SearchSlowLogMessage(searchContext, 10); + assertThat(p.getValueFor("stats"), equalTo("[\\\"group1\\\", \\\"group2\\\"]")); + } + public void testSlowLogSearchContextPrinterToLog() throws IOException { IndexService index = createIndex("foo"); SearchContext searchContext = createSearchContext(index); @@ -157,11 +209,11 @@ public void testSlowLogSearchContextPrinterToLog() throws IOException { searchContext.request().source(source); searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"))); - SearchSlowLog.SlowLogSearchContextPrinter p = new SearchSlowLog.SlowLogSearchContextPrinter(searchContext, 10); - assertThat(p.toString(), startsWith("[foo][0]")); + SearchSlowLog.SearchSlowLogMessage p = new SearchSlowLog.SearchSlowLogMessage(searchContext, 10); + assertThat(p.getFormattedMessage(), startsWith("[foo][0]")); // Makes sure that output doesn't contain any new lines - assertThat(p.toString(), not(containsString("\n"))); - assertThat(p.toString(), endsWith("id[my_id], ")); + assertThat(p.getFormattedMessage(), not(containsString("\n"))); + assertThat(p.getFormattedMessage(), endsWith("id[my_id], ")); } public void testLevelSetting() { diff --git a/server/src/test/java/org/elasticsearch/index/analysis/NamedAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/NamedAnalyzerTests.java index 2caaca53cae67..b1e53bed96e18 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/NamedAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/NamedAnalyzerTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.index.mapper.MapperException; +import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.test.ESTestCase; public class NamedAnalyzerTests extends ESTestCase { @@ -73,7 +74,13 @@ public AnalysisMode getAnalysisMode() { return mode; } }; - return new CustomAnalyzer(null, new CharFilterFactory[0], - new TokenFilterFactory[] { tokenFilter }); + TokenFilterFactory[] tokenfilters = new TokenFilterFactory[] { tokenFilter }; + CharFilterFactory[] charFilters = new CharFilterFactory[0]; + if (mode == AnalysisMode.SEARCH_TIME && randomBoolean()) { + AnalyzerComponents components = new AnalyzerComponents(null, charFilters, tokenfilters); + // sometimes also return reloadable custom analyzer + return new ReloadableCustomAnalyzer(components , TextFieldMapper.Defaults.POSITION_INCREMENT_GAP, -1); + } + return new CustomAnalyzer(null, charFilters, tokenfilters); } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 599133eb43d0a..0df178f924e58 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2862,19 +2862,15 @@ public void testMissingTranslog() throws IOException { // test that we can force start the engine , even if the translog is missing. engine.close(); // fake a new translog, causing the engine to point to a missing one. - final long primaryTerm = randomNonNegativeLong(); - Translog translog = createTranslog(() -> primaryTerm); + final long newPrimaryTerm = randomLongBetween(0L, primaryTerm.get()); + final Translog translog = createTranslog(() -> newPrimaryTerm); long id = translog.currentFileGeneration(); translog.close(); IOUtils.rm(translog.location().resolve(Translog.getFilename(id))); - try { - engine = createEngine(store, primaryTranslogDir); - fail("engine shouldn't start without a valid translog id"); - } catch (EngineCreationFailureException ex) { - // expected - } + expectThrows(EngineCreationFailureException.class, "engine shouldn't start without a valid translog id", + () -> createEngine(store, primaryTranslogDir)); // when a new translog is created it should be ok - final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, UNASSIGNED_SEQ_NO, shardId, primaryTerm); + final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, UNASSIGNED_SEQ_NO, shardId, newPrimaryTerm); store.associateIndexWithNewTranslog(translogUUID); EngineConfig config = config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null); engine = new InternalEngine(config); @@ -5712,7 +5708,6 @@ public void testMaxSeqNoInCommitUserData() throws Exception { assertMaxSeqNoInCommitUserData(engine); } - @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8809") public void testRefreshAndFailEngineConcurrently() throws Exception { AtomicBoolean stopped = new AtomicBoolean(); Semaphore indexedDocs = new Semaphore(0); @@ -5902,4 +5897,28 @@ private Map tombstonesInVersionMap(InternalEngine .filter(e -> e.getValue() instanceof DeleteVersionValue) .collect(Collectors.toMap(e -> e.getKey(), e -> (DeleteVersionValue) e.getValue())); } + + public void testHandleDocumentFailureOnReplica() throws Exception { + AtomicReference addDocException = new AtomicReference<>(); + IndexWriterFactory indexWriterFactory = (dir, iwc) -> new IndexWriter(dir, iwc) { + @Override + public long addDocument(Iterable doc) throws IOException { + final IOException ex = addDocException.getAndSet(null); + if (ex != null) { + throw ex; + } + return super.addDocument(doc); + } + }; + try (Store store = createStore(); + InternalEngine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, indexWriterFactory)) { + final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); + Engine.Index index = new Engine.Index(newUid(doc), doc, randomNonNegativeLong(), primaryTerm.get(), + randomNonNegativeLong(), null, REPLICA, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + addDocException.set(new IOException("simulated")); + expectThrows(IOException.class, () -> engine.index(index)); + assertTrue(engine.isClosed.get()); + assertNotNull(engine.failedEngine.get()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java b/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java index 47946a6850c48..8faf781fa7dad 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java @@ -95,8 +95,7 @@ public void testSerialization() throws IOException { segment.writeTo(output); output.flush(); StreamInput input = output.bytes().streamInput(); - Segment deserialized = new Segment(); - deserialized.readFrom(input); + Segment deserialized = new Segment(input); assertTrue(isSegmentEquals(deserialized, segment)); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index 7c0e554439078..c165d55e3a5f5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -181,10 +182,10 @@ public void parse(ParseContext context) throws IOException { pointMapper.parse(context.createExternalValueContext(point)); // Let's add a Dummy Shape - PointBuilder pb = new PointBuilder(-100, 45); if (shapeMapper instanceof GeoShapeFieldMapper) { - shapeMapper.parse(context.createExternalValueContext(pb.buildGeometry())); + shapeMapper.parse(context.createExternalValueContext(new Point(45, -100))); } else { + PointBuilder pb = new PointBuilder(-100, 45); shapeMapper.parse(context.createExternalValueContext(pb.buildS4J())); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index d80d51320403e..27da7cbe0592a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -75,6 +75,23 @@ public void testGeoHashValue() throws Exception { assertThat(doc.rootDoc().getField("point"), notNullValue()); } + public void testWKT() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("point").field("type", "geo_point"); + String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); + + ParsedDocument doc = defaultMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .field("point", "POINT (2 3)") + .endObject()), + XContentType.JSON)); + + assertThat(doc.rootDoc().getField("point"), notNullValue()); + } + public void testLatLonValuesStored() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index e7bec7ff95523..493bcc38a55e7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -19,7 +19,10 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.CannedTokenStream; import org.apache.lucene.analysis.MockSynonymAnalyzer; +import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.index.DocValuesType; @@ -30,6 +33,8 @@ import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; @@ -818,6 +823,28 @@ public void testFastPhraseMapping() throws IOException { new Term("synfield._index_phrase", "motor dog")}) .build())); + // https://github.com/elastic/elasticsearch/issues/43976 + CannedTokenStream cts = new CannedTokenStream( + new Token("foo", 1, 0, 2, 2), + new Token("bar", 0, 0, 2), + new Token("baz", 1, 0, 2) + ); + Analyzer synonymAnalyzer = new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + return new TokenStreamComponents(reader -> {}, cts); + } + }; + matchQuery.setAnalyzer(synonymAnalyzer); + Query q7 = matchQuery.parse(MatchQuery.Type.BOOLEAN, "synfield", "foo"); + assertThat(q7, is(new BooleanQuery.Builder().add(new BooleanQuery.Builder() + .add(new TermQuery(new Term("synfield", "foo")), BooleanClause.Occur.SHOULD) + .add(new PhraseQuery.Builder() + .add(new Term("synfield", "bar")) + .add(new Term("synfield", "baz")) + .build(), BooleanClause.Occur.SHOULD) + .build(), BooleanClause.Occur.SHOULD).build())); + ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index 0ebf8ba4d4f64..8fb30cf8d1c38 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -188,7 +188,7 @@ public void testMultiFieldWithinMultiField() throws IOException { BytesReference.bytes(mapping), true, mapping.contentType()).v2(); Version olderVersion = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - Mapper.TypeParser.ParserContext olderContext = new Mapper.TypeParser.ParserContext("type", + Mapper.TypeParser.ParserContext olderContext = new Mapper.TypeParser.ParserContext( null, null, type -> typeParser, olderVersion, null); TypeParsers.parseField(builder, "some-field", fieldNode, olderContext); @@ -203,7 +203,7 @@ public void testMultiFieldWithinMultiField() throws IOException { BytesReference.bytes(mapping), true, mapping.contentType()).v2(); Version version = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, Version.CURRENT); - Mapper.TypeParser.ParserContext context = new Mapper.TypeParser.ParserContext("type", + Mapper.TypeParser.ParserContext context = new Mapper.TypeParser.ParserContext( null, null, type -> typeParser, version, null); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java index 69464edb51332..8c14fead82429 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java @@ -23,8 +23,8 @@ import org.apache.lucene.analysis.CannedTokenStream; import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.search.intervals.Intervals; -import org.apache.lucene.search.intervals.IntervalsSource; +import org.apache.lucene.queries.intervals.Intervals; +import org.apache.lucene.queries.intervals.IntervalsSource; import org.elasticsearch.test.ESTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java index c480b52c6dc00..01a114503922c 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java @@ -22,8 +22,9 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.intervals.IntervalQuery; -import org.apache.lucene.search.intervals.Intervals; +import org.apache.lucene.queries.intervals.IntervalQuery; +import org.apache.lucene.queries.intervals.Intervals; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; @@ -395,7 +396,7 @@ public void testPrefixes() throws IOException { String json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + "\"prefix\" : { \"prefix\" : \"term\" } } } }"; IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json); - Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.prefix("term")); + Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.prefix(new BytesRef("term"))); assertEquals(expected, builder.toQuery(createShardContext())); String no_positions_json = "{ \"intervals\" : { \"" + NO_POSITIONS_FIELD + "\": { " + @@ -422,7 +423,7 @@ public void testPrefixes() throws IOException { "\"prefix\" : { \"prefix\" : \"t\" } } } }"; builder = (IntervalQueryBuilder) parseQuery(short_prefix_json); expected = new IntervalQuery(PREFIXED_FIELD, Intervals.or( - Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.wildcard("t?")), + Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.wildcard(new BytesRef("t?"))), Intervals.term("t"))); assertEquals(expected, builder.toQuery(createShardContext())); @@ -454,7 +455,7 @@ public void testWildcard() throws IOException { "\"wildcard\" : { \"pattern\" : \"Te?m\" } } } }"; IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json); - Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard("te?m")); + Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard(new BytesRef("te?m"))); assertEquals(expected, builder.toQuery(createShardContext())); String no_positions_json = "{ \"intervals\" : { \"" + NO_POSITIONS_FIELD + "\": { " + @@ -468,14 +469,14 @@ public void testWildcard() throws IOException { "\"wildcard\" : { \"pattern\" : \"Te?m\", \"analyzer\" : \"keyword\" } } } }"; builder = (IntervalQueryBuilder) parseQuery(keyword_json); - expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard("Te?m")); + expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard(new BytesRef("Te?m"))); assertEquals(expected, builder.toQuery(createShardContext())); String fixed_field_json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + "\"wildcard\" : { \"pattern\" : \"Te?m\", \"use_field\" : \"masked_field\" } } } }"; builder = (IntervalQueryBuilder) parseQuery(fixed_field_json); - expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard("te?m"))); + expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard(new BytesRef("te?m")))); assertEquals(expected, builder.toQuery(createShardContext())); String fixed_field_json_no_positions = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + @@ -489,7 +490,8 @@ public void testWildcard() throws IOException { "\"wildcard\" : { \"pattern\" : \"Te?m\", \"use_field\" : \"masked_field\", \"analyzer\" : \"keyword\" } } } }"; builder = (IntervalQueryBuilder) parseQuery(fixed_field_analyzer_json); - expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard("Te?m"))); + expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, + Intervals.wildcard(new BytesRef("Te?m")))); assertEquals(expected, builder.toQuery(createShardContext())); } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index 71aab8ca9f9f6..7822244b9ceaa 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -48,11 +48,11 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase point.resetFromString("NOT A POINT(1 2)") + ); + assertEquals("Invalid WKT format", e.getMessage()); + + Exception e2 = expectThrows( + ElasticsearchParseException.class, + () -> point.resetFromString("MULTIPOINT(1 2, 3 4)") + ); + assertEquals("[geo_point] supports only POINT among WKT primitives, but found MULTIPOINT", e2.getMessage()); } public void testEqualsHashCodeContract() { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 84470b38fda6a..9d1a53369771d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1456,7 +1456,7 @@ public void testShardStats() throws IOException { BytesStreamOutput out = new BytesStreamOutput(); stats.writeTo(out); StreamInput in = out.bytes().streamInput(); - stats = ShardStats.readShardStats(in); + stats = new ShardStats(in); XContentBuilder builder = jsonBuilder(); builder.startObject(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index b7881adf76285..5cd5a1328695a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -28,8 +28,8 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanation; -import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; @@ -62,9 +62,9 @@ import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.translog.TestTranslog; +import org.elasticsearch.index.translog.TranslogCorruptedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.ESIntegTestCase; @@ -82,9 +82,10 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -95,7 +96,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -156,8 +159,7 @@ public void testCorruptIndex() throws Exception { containsString("is Elasticsearch still running ?"))); } - final Set indexDirs = getDirs(indexName, ShardPath.INDEX_FOLDER_NAME); - assertThat(indexDirs, hasSize(1)); + final Path indexDir = getPathToShardData(indexName, ShardPath.INDEX_FOLDER_NAME); internalCluster().restartNode(node, new InternalTestCluster.RestartCallback() { @Override @@ -170,7 +172,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertThat(e.getMessage(), startsWith("Shard does not seem to be corrupted at")); } - CorruptionUtils.corruptIndex(random(), indexDirs.iterator().next(), false); + CorruptionUtils.corruptIndex(random(), indexDir, false); return super.onNodeStopped(nodeName); } }); @@ -266,13 +268,11 @@ public void testCorruptTranslogTruncation() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog - .put("index.routing.allocation.exclude._name", node2) - )); + .put("index.routing.allocation.exclude._name", node2))); ensureYellow(); assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() - .put("index.routing.allocation.exclude._name", (String)null) - )); + .putNull("index.routing.allocation.exclude._name"))); ensureGreen(); // Index some documents @@ -294,7 +294,6 @@ public void testCorruptTranslogTruncation() throws Exception { builders[i] = client().prepareIndex(indexName, "type").setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); - Set translogDirs = getDirs(indexName, ShardPath.TRANSLOG_FOLDER_NAME); RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); MockTerminal terminal = new MockTerminal(); @@ -314,60 +313,57 @@ public void testCorruptTranslogTruncation() throws Exception { // shut down the replica node to be tested later internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node2)); - // Corrupt the translog file(s) - logger.info("--> corrupting translog"); - corruptRandomTranslogFiles(indexName); + final Path translogDir = getPathToShardData(indexName, ShardPath.TRANSLOG_FOLDER_NAME); + final Path indexDir = getPathToShardData(indexName, ShardPath.INDEX_FOLDER_NAME); // Restart the single node logger.info("--> restarting node"); - internalCluster().restartRandomDataNode(); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + logger.info("--> corrupting translog on node {}", nodeName); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir); + return super.onNodeStopped(nodeName); + } + }); // all shards should be failed due to a corrupted translog assertBusy(() -> { - final ClusterAllocationExplanation explanation = - client().admin().cluster().prepareAllocationExplain() - .setIndex(indexName).setShard(0).setPrimary(true) - .get().getExplanation(); - - final UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + final UnassignedInfo unassignedInfo = client().admin().cluster().prepareAllocationExplain() + .setIndex(indexName).setShard(0).setPrimary(true).get().getExplanation().getUnassignedInfo(); assertThat(unassignedInfo.getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); + assertThat(ExceptionsHelper.unwrap(unassignedInfo.getFailure(), TranslogCorruptedException.class), not(nullValue())); }); // have to shut down primary node - otherwise node lock is present - final InternalTestCluster.RestartCallback callback = - new InternalTestCluster.RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - // and we can actually truncate the translog - for (Path translogDir : translogDirs) { - final Path idxLocation = translogDir.getParent().resolve(ShardPath.INDEX_FOLDER_NAME); - assertBusy(() -> { - logger.info("--> checking that lock has been released for {}", idxLocation); - try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); - Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - // Great, do nothing, we just wanted to obtain the lock - } catch (LockObtainFailedException lofe) { - logger.info("--> failed acquiring lock for {}", idxLocation); - fail("still waiting for lock release at [" + idxLocation + "]"); - } catch (IOException ioe) { - fail("Got an IOException: " + ioe); - } - }); - - final Environment environment = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(node1PathSettings).build()); - - terminal.addTextInput("y"); - OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); - logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); - command.execute(terminal, options, environment); - logger.info("--> output:\n{}", terminal.getOutput()); + internalCluster().restartNode(node1, new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + assertBusy(() -> { + logger.info("--> checking that lock has been released for {}", indexDir); + //noinspection EmptyTryBlock since we're just trying to obtain the lock + try (Directory dir = FSDirectory.open(indexDir, NativeFSLockFactory.INSTANCE); + Lock ignored = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { + } catch (LockObtainFailedException lofe) { + logger.info("--> failed acquiring lock for {}", indexDir); + throw new AssertionError("still waiting for lock release at [" + indexDir + "]", lofe); + } catch (IOException ioe) { + throw new AssertionError("unexpected IOException [" + indexDir + "]", ioe); } + }); - return super.onNodeStopped(nodeName); - } - }; - internalCluster().restartNode(node1, callback); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(node1PathSettings).build()); + + terminal.addTextInput("y"); + OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); + logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); + command.execute(terminal, options, environment); + logger.info("--> output:\n{}", terminal.getOutput()); + + return super.onNodeStopped(nodeName); + } + }); String primaryNodeId = null; final ClusterState state = client().admin().cluster().prepareState().get().getState(); @@ -477,7 +473,7 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { // sample the replica node translog dirs final ShardId shardId = new ShardId(resolveIndex(indexName), 0); - final Set translogDirs = getDirs(node2, shardId, ShardPath.TRANSLOG_FOLDER_NAME); + final Path translogDir = getPathToShardData(node2, shardId, ShardPath.TRANSLOG_FOLDER_NAME); final Settings node1PathSettings = internalCluster().dataPathSettings(node1); final Settings node2PathSettings = internalCluster().dataPathSettings(node2); @@ -488,7 +484,7 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { // Corrupt the translog file(s) on the replica logger.info("--> corrupting translog"); - TestTranslog.corruptRandomTranslogFile(logger, random(), translogDirs); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir); // Start the node with the non-corrupted data path logger.info("--> starting node"); @@ -504,15 +500,13 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { final MockTerminal terminal = new MockTerminal(); final OptionParser parser = command.getParser(); - for (Path translogDir : translogDirs) { - final Environment environment = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(node2PathSettings).build()); - terminal.addTextInput("y"); - OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); - logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); - command.execute(terminal, options, environment); - logger.info("--> output:\n{}", terminal.getOutput()); - } + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(node2PathSettings).build()); + terminal.addTextInput("y"); + OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); + logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); + command.execute(terminal, options, environment); + logger.info("--> output:\n{}", terminal.getOutput()); logger.info("--> starting the replica node to test recovery"); internalCluster().startNode(node2PathSettings); @@ -566,9 +560,7 @@ public void testResolvePath() throws Exception { final Map environmentByNodeName = new HashMap<>(); for (String nodeName : nodeNames) { final String nodeId = nodeNameToNodeId.get(nodeName); - final Set indexDirs = getDirs(nodeId, shardId, ShardPath.INDEX_FOLDER_NAME); - assertThat(indexDirs, hasSize(1)); - indexPathByNodeName.put(nodeName, indexDirs.iterator().next()); + indexPathByNodeName.put(nodeName, getPathToShardData(nodeId, shardId, ShardPath.INDEX_FOLDER_NAME)); final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(internalCluster().dataPathSettings(nodeName)).build()); @@ -586,7 +578,7 @@ public void testResolvePath() throws Exception { } } - private Set getDirs(String indexName, String dirSuffix) { + private Path getPathToShardData(String indexName, String dirSuffix) { ClusterState state = client().admin().cluster().prepareState().get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{indexName}, false); List iterators = iterableAsArrayList(shardIterators); @@ -597,30 +589,21 @@ private Set getDirs(String indexName, String dirSuffix) { assertTrue(shardRouting.assignedToNode()); String nodeId = shardRouting.currentNodeId(); ShardId shardId = shardRouting.shardId(); - return getDirs(nodeId, shardId, dirSuffix); + return getPathToShardData(nodeId, shardId, dirSuffix); } - public static Set getDirs(String nodeId, ShardId shardId, String dirSuffix) { - final NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get(); - final Set translogDirs = new TreeSet<>(); - final NodeStats nodeStats = nodeStatses.getNodes().get(0); - for (FsInfo.Path fsPath : nodeStats.getFs()) { - final String path = fsPath.getPath(); - final Path p = PathUtils.get(path) + public static Path getPathToShardData(String nodeId, ShardId shardId, String shardPathSubdirectory) { + final NodesStatsResponse nodeStatsResponse = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get(); + final Set paths = StreamSupport.stream(nodeStatsResponse.getNodes().get(0).getFs().spliterator(), false) + .map(nodePath -> PathUtils.get(nodePath.getPath()) .resolve(NodeEnvironment.INDICES_FOLDER) .resolve(shardId.getIndex().getUUID()) .resolve(Integer.toString(shardId.getId())) - .resolve(dirSuffix); - if (Files.isDirectory(p)) { - translogDirs.add(p); - } - } - return translogDirs; - } - - private void corruptRandomTranslogFiles(String indexName) throws IOException { - Set translogDirs = getDirs(indexName, ShardPath.TRANSLOG_FOLDER_NAME); - TestTranslog.corruptRandomTranslogFile(logger, random(), translogDirs); + .resolve(shardPathSubdirectory)) + .filter(Files::isDirectory) + .collect(Collectors.toSet()); + assertThat(paths, hasSize(1)); + return paths.iterator().next(); } /** Disables translog flushing for the specified index */ diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 4e4b1756b2cbd..ac3941f6bbc9e 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -52,7 +52,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; -import java.util.Collections; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -215,7 +214,7 @@ public void testCorruptedTranslog() throws Exception { // close shard closeShards(indexShard); - TestTranslog.corruptRandomTranslogFile(logger, random(), Collections.singletonList(translogPath)); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogPath); // test corrupted shard final IndexShard corruptedShard = reopenIndexShard(true); @@ -281,7 +280,7 @@ public void testCorruptedBothIndexAndTranslog() throws Exception { expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); closeShards(corruptedShard); } - TestTranslog.corruptRandomTranslogFile(logger, random(), Collections.singletonList(translogPath)); + TestTranslog.corruptRandomTranslogFile(logger, random(), translogPath); final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final MockTerminal t = new MockTerminal(); diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index 7936e8efd5624..c50f8c80e72e4 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -19,48 +19,39 @@ package org.elasticsearch.index.store; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.translog.TestTranslog; -import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.index.translog.TranslogCorruptedException; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.engine.MockEngineSupport; import org.elasticsearch.test.transport.MockTransportService; -import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; /** * Integration test for corrupted translog files */ -@ESIntegTestCase.ClusterScope(scope= ESIntegTestCase.Scope.SUITE, numDataNodes = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0) public class CorruptedTranslogIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { @@ -68,78 +59,46 @@ protected Collection> nodePlugins() { } public void testCorruptTranslogFiles() throws Exception { - internalCluster().startNodes(1, Settings.EMPTY); + internalCluster().startNode(Settings.EMPTY); assertAcked(prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 0) - .put("index.refresh_interval", "-1") - .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog - )); + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.refresh_interval", "-1") + .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)))); // Index some documents - int numDocs = scaledRandomIntBetween(100, 1000); - IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; + IndexRequestBuilder[] builders = new IndexRequestBuilder[scaledRandomIntBetween(100, 1000)]; for (int i = 0; i < builders.length; i++) { builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar"); } - disableTranslogFlush("test"); - indexRandom(false, false, false, Arrays.asList(builders)); // this one - // Corrupt the translog file(s) - corruptRandomTranslogFile(); + indexRandom(false, false, false, Arrays.asList(builders)); - // Restart the single node - internalCluster().fullRestart(); - client().admin().cluster().prepareHealth().setWaitForYellowStatus(). - setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get(); + final Path translogPath = internalCluster().getInstance(IndicesService.class) + .indexService(resolveIndex("test")).getShard(0).shardPath().resolveTranslog(); - try { - client().prepareSearch("test").setQuery(matchAllQuery()).get(); - fail("all shards should be failed due to a corrupted translog"); - } catch (SearchPhaseExecutionException e) { - // Good, all shards should be failed because there is only a - // single shard and its translog is corrupt - } - } + internalCluster().fullRestart(new InternalTestCluster.RestartCallback(){ + @Override + public void onAllNodesStopped() throws Exception { + TestTranslog.corruptRandomTranslogFile(logger, random(), translogPath); + } + }); + assertBusy(() -> { + final ClusterAllocationExplainResponse allocationExplainResponse + = client().admin().cluster().prepareAllocationExplain().setIndex("test").setShard(0).setPrimary(true).get(); + final UnassignedInfo unassignedInfo = allocationExplainResponse.getExplanation().getUnassignedInfo(); + assertThat(unassignedInfo, not(nullValue())); + final Throwable cause = ExceptionsHelper.unwrap(unassignedInfo.getFailure(), TranslogCorruptedException.class); + assertThat(cause, not(nullValue())); + assertThat(cause.getMessage(), containsString(translogPath.toString())); + }); - private void corruptRandomTranslogFile() throws IOException { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false); - final Index test = state.metaData().index("test").getIndex(); - List iterators = iterableAsArrayList(shardIterators); - ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); - ShardRouting shardRouting = shardIterator.nextOrNull(); - assertNotNull(shardRouting); - assertTrue(shardRouting.primary()); - assertTrue(shardRouting.assignedToNode()); - String nodeId = shardRouting.currentNodeId(); - NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get(); - Set translogDirs = new HashSet<>(); - for (FsInfo.Path fsPath : nodeStatses.getNodes().get(0).getFs()) { - String path = fsPath.getPath(); - String relativeDataLocationPath = "indices/" + test.getUUID() + "/" + Integer.toString(shardRouting.getId()) + "/translog"; - Path translogDir = PathUtils.get(path).resolve(relativeDataLocationPath); - if (Files.isDirectory(translogDir)) { - translogDirs.add(translogDir); - } - } - Path translogDir = RandomPicks.randomFrom(random(), translogDirs); - TestTranslog.corruptRandomTranslogFile(logger, random(), Arrays.asList(translogDir)); - } + assertThat(expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("test").setQuery(matchAllQuery()).get()) + .getMessage(), containsString("all shards failed")); - /** Disables translog flushing for the specified index */ - private static void disableTranslogFlush(String index) { - Settings settings = Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).build(); - client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } - /** Enables translog flushing for the specified index */ - private static void enableTranslogFlush(String index) { - Settings settings = Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)).build(); - client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); - } } diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index dc2557a1f6e13..88da2b349f71e 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -869,7 +869,7 @@ public void testStreamStoreFilesMetaData() throws Exception { InputStreamStreamInput in = new InputStreamStreamInput(inBuffer); in.setVersion(targetNodeVersion); TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData = - TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in); + new TransportNodesListShardStoreMetaData.StoreFilesMetaData(in); Iterator outFiles = outStoreFileMetaData.iterator(); for (StoreFileMetaData inFile : inStoreFileMetaData) { assertThat(inFile.name(), equalTo(outFiles.next().name())); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java index af1693a45bf9b..f80d0db880057 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java @@ -25,6 +25,8 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.NIOFSDirectory; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.engine.CombinedDeletionPolicy; import java.io.IOException; @@ -35,7 +37,6 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; -import java.util.Collection; import java.util.Comparator; import java.util.Iterator; import java.util.List; @@ -45,6 +46,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.elasticsearch.index.translog.Translog.CHECKPOINT_FILE_NAME; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -56,41 +58,94 @@ * Helpers for testing translog. */ public class TestTranslog { - private static final Pattern TRANSLOG_FILE_PATTERN = Pattern.compile("translog-(\\d+)\\.tlog"); + private static final Pattern TRANSLOG_FILE_PATTERN = Pattern.compile("^translog-(\\d+)\\.(tlog|ckp)$"); - public static void corruptRandomTranslogFile(Logger logger, Random random, Collection translogDirs) throws IOException { - for (Path translogDir : translogDirs) { - final long minTranslogGen = minTranslogGenUsedInRecovery(translogDir); - corruptRandomTranslogFile(logger, random, translogDir, minTranslogGen); - } + /** + * Corrupts random translog file (translog-N.tlog or translog-N.ckp or translog.ckp) from the given translog directory, ignoring + * translogs and checkpoints with generations below the generation recorded in the latest index commit found in translogDir/../index/, + * or writes a corrupted translog-N.ckp file as if from a crash while rolling a generation. + * + *

+ * See {@link TestTranslog#corruptFile(Logger, Random, Path, boolean)} for details of the corruption applied. + */ + public static void corruptRandomTranslogFile(Logger logger, Random random, Path translogDir) throws IOException { + corruptRandomTranslogFile(logger, random, translogDir, minTranslogGenUsedInRecovery(translogDir)); } /** - * Corrupts random translog file (translog-N.tlog) from the given translog directory. + * Corrupts random translog file (translog-N.tlog or translog-N.ckp or translog.ckp) from the given translog directory, or writes a + * corrupted translog-N.ckp file as if from a crash while rolling a generation. + *

+ * See {@link TestTranslog#corruptFile(Logger, Random, Path, boolean)} for details of the corruption applied. + * + * @param minGeneration the minimum generation (N) to corrupt. Translogs and checkpoints with lower generation numbers are ignored. */ - public static void corruptRandomTranslogFile(Logger logger, Random random, Path translogDir, long minGeneration) - throws IOException { - Set candidates = new TreeSet<>(); // TreeSet makes sure iteration order is deterministic + static void corruptRandomTranslogFile(Logger logger, Random random, Path translogDir, long minGeneration) throws IOException { logger.info("--> corruptRandomTranslogFile: translogDir [{}], minUsedTranslogGen [{}]", translogDir, minGeneration); + + Path unnecessaryCheckpointCopyPath = null; + try { + final Path checkpointPath = translogDir.resolve(CHECKPOINT_FILE_NAME); + final Checkpoint checkpoint = Checkpoint.read(checkpointPath); + unnecessaryCheckpointCopyPath = translogDir.resolve(Translog.getCommitCheckpointFileName(checkpoint.generation)); + if (LuceneTestCase.rarely(random) && Files.exists(unnecessaryCheckpointCopyPath) == false) { + // if we crashed while rolling a generation then we might have copied `translog.ckp` to its numbered generation file but + // have not yet written a new `translog.ckp`. During recovery we must also verify that this file is intact, so it's ok to + // corrupt this file too (either by writing the wrong information, correctly formatted, or by properly corrupting it) + final Checkpoint checkpointCopy = LuceneTestCase.usually(random) ? checkpoint + : new Checkpoint(checkpoint.offset + random.nextInt(2), checkpoint.numOps + random.nextInt(2), + checkpoint.generation + random.nextInt(2), checkpoint.minSeqNo + random.nextInt(2), + checkpoint.maxSeqNo + random.nextInt(2), checkpoint.globalCheckpoint + random.nextInt(2), + checkpoint.minTranslogGeneration + random.nextInt(2), checkpoint.trimmedAboveSeqNo + random.nextInt(2)); + Checkpoint.write(FileChannel::open, unnecessaryCheckpointCopyPath, checkpointCopy, + StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + + if (checkpointCopy.equals(checkpoint) == false) { + logger.info("corruptRandomTranslogFile: created [{}] containing [{}] instead of [{}]", unnecessaryCheckpointCopyPath, + checkpointCopy, checkpoint); + return; + } // else checkpoint copy has the correct content so it's now a candidate for the usual kinds of corruption + } + } catch (TranslogCorruptedException e) { + // missing or corrupt checkpoint already, find something else to break... + } + + Set candidates = new TreeSet<>(); // TreeSet makes sure iteration order is deterministic try (DirectoryStream stream = Files.newDirectoryStream(translogDir)) { for (Path item : stream) { - if (Files.isRegularFile(item)) { - final Matcher matcher = TRANSLOG_FILE_PATTERN.matcher(item.getFileName().toString()); - if (matcher.matches() && Long.parseLong(matcher.group(1)) >= minGeneration) { + if (Files.isRegularFile(item) && Files.size(item) > 0) { + final String filename = item.getFileName().toString(); + final Matcher matcher = TRANSLOG_FILE_PATTERN.matcher(filename); + if (filename.equals("translog.ckp") || (matcher.matches() && Long.parseLong(matcher.group(1)) >= minGeneration)) { candidates.add(item); } } } } - assertThat("no translog files found in " + translogDir, candidates, is(not(empty()))); + assertThat("no corruption candidates found in " + translogDir, candidates, is(not(empty()))); + + final Path fileToCorrupt = RandomPicks.randomFrom(random, candidates); - Path corruptedFile = RandomPicks.randomFrom(random, candidates); - corruptFile(logger, random, corruptedFile); + // deleting the unnecessary checkpoint file doesn't count as a corruption + final boolean maybeDelete = fileToCorrupt.equals(unnecessaryCheckpointCopyPath) == false; + + corruptFile(logger, random, fileToCorrupt, maybeDelete); } - static void corruptFile(Logger logger, Random random, Path fileToCorrupt) throws IOException { + /** + * Corrupt an (existing and nonempty) file by replacing any byte in the file with a random (different) byte, or by truncating the file + * to a random (strictly shorter) length, or by deleting the file. + */ + static void corruptFile(Logger logger, Random random, Path fileToCorrupt, boolean maybeDelete) throws IOException { + assertThat(fileToCorrupt + " should be a regular file", Files.isRegularFile(fileToCorrupt)); final long fileSize = Files.size(fileToCorrupt); - assertThat("cannot corrupt empty file " + fileToCorrupt, fileSize, greaterThan(0L)); + assertThat(fileToCorrupt + " should not be an empty file", fileSize, greaterThan(0L)); + + if (maybeDelete && random.nextBoolean() && random.nextBoolean()) { + logger.info("corruptFile: deleting file {}", fileToCorrupt); + IOUtils.rm(fileToCorrupt); + return; + } try (FileChannel fileChannel = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) { final long corruptPosition = RandomNumbers.randomLongBetween(random, 0, fileSize - 1); @@ -114,10 +169,10 @@ static void corruptFile(Logger logger, Random random, Path fileToCorrupt) throws // rewrite fileChannel.position(corruptPosition); fileChannel.write(bb); - logger.info("--> corrupting file {} at position {} turning 0x{} into 0x{}", fileToCorrupt, corruptPosition, + logger.info("corruptFile: corrupting file {} at position {} turning 0x{} into 0x{}", fileToCorrupt, corruptPosition, Integer.toHexString(oldValue & 0xff), Integer.toHexString(newValue & 0xff)); } else { - logger.info("--> truncating file {} from length {} to length {}", fileToCorrupt, fileSize, corruptPosition); + logger.info("corruptFile: truncating file {} from length {} to length {}", fileToCorrupt, fileSize, corruptPosition); fileChannel.truncate(corruptPosition); } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java index 90960693f8af7..c41286275ca53 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java @@ -57,7 +57,7 @@ public void testCurrentHeaderVersion() throws Exception { assertThat(mismatchUUID.getMessage(), containsString("this translog file belongs to a different translog")); int corruptions = between(1, 10); for (int i = 0; i < corruptions && Files.size(translogFile) > 0; i++) { - TestTranslog.corruptFile(logger, random(), translogFile); + TestTranslog.corruptFile(logger, random(), translogFile, false); } expectThrows(TranslogCorruptedException.class, () -> { try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index c99fee9dcb8a7..6d00c72ddf9ef 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -20,12 +20,12 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.Term; import org.apache.lucene.mockfile.FilterFileChannel; import org.apache.lucene.mockfile.FilterFileSystemProvider; @@ -84,6 +84,7 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.CopyOption; import java.nio.file.FileAlreadyExistsException; import java.nio.file.Files; @@ -125,6 +126,7 @@ import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -149,6 +151,12 @@ public class TranslogTests extends ESTestCase { // A default primary term is used by translog instances created in this test. private final AtomicLong primaryTerm = new AtomicLong(); private final AtomicReference persistedSeqNoConsumer = new AtomicReference<>(); + private boolean expectIntactTranslog; + + @Before + public void expectIntactTranslogByDefault() { + expectIntactTranslog = true; + } @Override protected void afterIfSuccessful() throws Exception { @@ -162,7 +170,9 @@ protected void afterIfSuccessful() throws Exception { } translog.close(); } - assertFileIsPresent(translog, translog.currentFileGeneration()); + if (expectIntactTranslog) { + assertFileIsPresent(translog, translog.currentFileGeneration()); + } IOUtils.rm(translog.location()); // delete all the locations } @@ -850,30 +860,31 @@ public void testTranslogCorruption() throws Exception { String uuid = translog.getTranslogUUID(); List locations = new ArrayList<>(); - int translogOperations = randomIntBetween(10, 100); + int translogOperations = randomIntBetween(10, 1000); for (int op = 0; op < translogOperations; op++) { String ascii = randomAlphaOfLengthBetween(1, 50); locations.add( translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))) ); + + if (rarely()) { + translog.rollGeneration(); + } } translog.close(); TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, 0); - int corruptionsCaught = 0; - try (Translog translog = openTranslog(config, uuid)) { - try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(expectThrows(TranslogCorruptedException.class, () -> { + try (Translog translog = openTranslog(config, uuid); + Translog.Snapshot snapshot = translog.newSnapshot()) { for (int i = 0; i < locations.size(); i++) { snapshot.next(); } } - } catch (TranslogCorruptedException e) { - assertThat(e.getMessage(), containsString(translogDir.toString())); - corruptionsCaught++; - } + }).getMessage(), containsString(translogDir.toString())); - assertThat("corruption is caught", corruptionsCaught, greaterThanOrEqualTo(1)); + expectIntactTranslog = false; } public void testTruncatedTranslogs() throws Exception { @@ -1561,14 +1572,12 @@ public void testRecoveryUncommittedFileExists() throws IOException { } public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { - List locations = new ArrayList<>(); int translogOperations = 100; final int prepareOp = 44; Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, - primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(StandardCharsets.UTF_8))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1589,15 +1598,13 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { - fail("corrupted"); - } catch (IllegalStateException ex) { - assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3025, " + + final TranslogCorruptedException translogCorruptedException = expectThrows(TranslogCorruptedException.class, () -> + new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> { })); + assertThat(translogCorruptedException.getMessage(), endsWith( + "] is corrupted, checkpoint file translog-3.ckp already exists but has corrupted content: expected Checkpoint{offset=3025, " + "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1, trimmedAboveSeqNo=-2} " + - "but got: Checkpoint{offset=0, numOps=0, generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, " + - "minTranslogGeneration=0, trimmedAboveSeqNo=-2}", ex.getMessage()); - } + "but got Checkpoint{offset=0, numOps=0, generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, " + + "minTranslogGeneration=0, trimmedAboveSeqNo=-2}")); Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, @@ -2785,6 +2792,14 @@ public void testCheckpointOnDiskFull() throws IOException { assertEquals(read, checkpoint); } + public void testLegacyCheckpointVersion() { + expectThrows( + TranslogCorruptedException.class, + IndexFormatTooOldException.class, + () -> Checkpoint.read(getDataPath("/org/elasticsearch/index/checkpoint/v2.ckp.binary")) + ); + } + /** * Tests that closing views after the translog is fine and we can reopen the translog */ diff --git a/server/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/server/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index fd96179a4c38b..f1b25e44c9f97 100644 --- a/server/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/server/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -81,8 +82,8 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // first wait for 2 nodes in the cluster logger.info("Waiting for replicas to be assigned"); - ClusterHealthResponse clusterHealth = - client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet(); + ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -118,7 +119,8 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // first wait for 3 nodes in the cluster logger.info("Waiting for replicas to be assigned"); - clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet(); + clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -129,8 +131,8 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join) client().admin().cluster().prepareReroute().execute().actionGet(); - clusterHealth = client().admin().cluster().health( - clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForNoRelocatingShards(true)).actionGet(); + clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3)); @@ -166,15 +168,16 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { internalCluster().stopRandomNode(InternalTestCluster.nameFilter(server_1)); // verify health logger.info("Running Cluster Health"); - clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet(); + clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); client().admin().cluster().prepareReroute().get(); - clusterHealth = client().admin().cluster().health( - clusterHealthRequest().waitForGreenStatus().waitForNoRelocatingShards(true).waitForNodes("2")).actionGet(); + clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getRelocatingShards(), equalTo(0)); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index a8d3b09c9e0c5..e7fc6ba5f9609 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -452,8 +452,8 @@ public void testStandAloneMapperServiceWithPlugins() throws IOException { .numberOfReplicas(0) .build(); MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); - assertNotNull(mapperService.documentMapperParser().parserContext("type").typeParser("fake-mapper")); - Similarity sim = mapperService.documentMapperParser().parserContext("type").getSimilarity("test").get(); + assertNotNull(mapperService.documentMapperParser().parserContext().typeParser("fake-mapper")); + Similarity sim = mapperService.documentMapperParser().parserContext().getSimilarity("test").get(); assertThat(sim, instanceOf(NonNegativeScoresSimilarity.class)); sim = ((NonNegativeScoresSimilarity) sim).getDelegate(); assertThat(sim, instanceOf(BM25Similarity.class)); diff --git a/server/src/test/java/org/elasticsearch/indices/NodeIndicesStatsTests.java b/server/src/test/java/org/elasticsearch/indices/NodeIndicesStatsTests.java index b8ae55a44d28e..dd6b837cfaa64 100644 --- a/server/src/test/java/org/elasticsearch/indices/NodeIndicesStatsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/NodeIndicesStatsTests.java @@ -30,7 +30,7 @@ public class NodeIndicesStatsTests extends ESTestCase { public void testInvalidLevel() { - final NodeIndicesStats stats = new NodeIndicesStats(); + final NodeIndicesStats stats = new NodeIndicesStats(null, Collections.emptyMap()); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stats.toXContent(null, params)); diff --git a/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index e5d0a5643f0e4..50aa6df7f736f 100644 --- a/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.MockKeywordPlugin; import org.hamcrest.core.IsNull; @@ -41,6 +42,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.startsWith; +@ClusterScope(minNumDataNodes = 2) public class AnalyzeActionIT extends ESIntegTestCase { @Override @@ -387,5 +389,16 @@ public void testAnalyzeNormalizedKeywordField() throws IOException { assertThat(token.getPositionLength(), equalTo(1)); } + /** + * Input text that doesn't produce tokens should return an empty token list + */ + public void testZeroTokenAnalysis() throws IOException { + assertAcked(prepareCreate("test")); + ensureGreen("test"); + + AnalyzeAction.Response analyzeResponse = client().admin().indices().prepareAnalyze("test", ".").get(); + assertNotNull(analyzeResponse.getTokens()); + assertThat(analyzeResponse.getTokens().size(), equalTo(0)); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index a4411930f92e4..9918b0c6e6188 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -246,6 +246,10 @@ long currentMemoryUsage() { assertThat(exception.getMessage(), containsString("real usage: [181/181b], new bytes reserved: [" + (reservationInBytes * 2) + "/" + new ByteSizeValue(reservationInBytes * 2) + "]")); + final long requestCircuitBreakerUsed = (requestBreaker.getUsed() + reservationInBytes) * 2; + assertThat(exception.getMessage(), + containsString("usages [request=" + requestCircuitBreakerUsed + "/" + new ByteSizeValue(requestCircuitBreakerUsed) + + ", fielddata=0/0b, in_flight_requests=0/0b, accounting=0/0b]")); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); assertEquals(0, requestBreaker.getTrippedCount()); assertEquals(1, service.stats().getStats(CircuitBreaker.PARENT).getTrippedCount()); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 80aaae2049fd5..273a956170354 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -24,6 +24,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -41,9 +42,11 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeActionUtils; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; @@ -143,8 +146,10 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))), new TestGatewayAllocator(), new BalancedShardsAllocator(SETTINGS), EmptyClusterInfoService.INSTANCE); - shardFailedClusterStateTaskExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger); - shardStartedClusterStateTaskExecutor = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, logger); + shardFailedClusterStateTaskExecutor + = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger); + shardStartedClusterStateTaskExecutor + = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, null, logger); ActionFilters actionFilters = new ActionFilters(Collections.emptySet()); IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); DestructiveOperations destructiveOperations = new DestructiveOperations(SETTINGS, clusterSettings); @@ -187,11 +192,14 @@ public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version m return indexMetaData; } }; + NodeClient client = new NodeClient(Settings.EMPTY, threadPool); + Map actions = new HashMap<>(); + actions.put(TransportVerifyShardBeforeCloseAction.TYPE, new TransportVerifyShardBeforeCloseAction(SETTINGS, + transportService, clusterService, indicesService, threadPool, null, actionFilters, indexNameExpressionResolver)); + client.initialize(actions, null, null); - TransportVerifyShardBeforeCloseAction transportVerifyShardBeforeCloseAction = new TransportVerifyShardBeforeCloseAction(SETTINGS, - transportService, clusterService, indicesService, threadPool, null, actionFilters, indexNameExpressionResolver); MetaDataIndexStateService indexStateService = new MetaDataIndexStateService(clusterService, allocationService, - metaDataIndexUpgradeService, indicesService, threadPool, transportVerifyShardBeforeCloseAction); + metaDataIndexUpgradeService, indicesService, threadPool, client); MetaDataDeleteIndexService deleteIndexService = new MetaDataDeleteIndexService(SETTINGS, clusterService, allocationService); MetaDataUpdateSettingsService metaDataUpdateSettingsService = new MetaDataUpdateSettingsService(clusterService, allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, indicesService, threadPool); @@ -213,7 +221,7 @@ allocationService, new AliasValidator(), environment, transportService, clusterService, threadPool, createIndexService, actionFilters, indexNameExpressionResolver); nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); - joinTaskExecutor = new JoinTaskExecutor(allocationService, logger, (s, r) -> {}); + joinTaskExecutor = new JoinTaskExecutor(allocationService, logger, (s, p, r) -> {}); } public ClusterState createIndex(ClusterState state, CreateIndexRequest request) { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 18aef3720c31a..2933eba1659bd 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -71,7 +71,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.StubbableTransport; @@ -291,12 +290,6 @@ public void testReplicaRecovery() throws Exception { assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), numOfDocs); } - @TestLogging( - "_root:DEBUG," - + "org.elasticsearch.cluster.service:TRACE," - + "org.elasticsearch.indices.cluster:TRACE," - + "org.elasticsearch.indices.recovery:TRACE," - + "org.elasticsearch.index.shard:TRACE") public void testRerouteRecovery() throws Exception { logger.info("--> start node A"); final String nodeA = internalCluster().startNode(); @@ -713,7 +706,6 @@ public void sendRequest(Transport.Connection connection, long requestId, String * Tests scenario where recovery target successfully sends recovery request to source but then the channel gets closed while * the source is working on the recovery process. */ - @TestLogging("_root:DEBUG,org.elasticsearch.indices.recovery:TRACE") public void testDisconnectsDuringRecovery() throws Exception { boolean primaryRelocation = randomBoolean(); final String indexName = "test"; @@ -1015,4 +1007,5 @@ public TokenStream create(TokenStream tokenStream) { }); } } + } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 215bf475a0c9b..f6e1de0233bf7 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -32,11 +32,12 @@ import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefIterator; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.StepListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -77,6 +78,7 @@ import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -93,10 +95,11 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.IntSupplier; import java.util.zip.CRC32; @@ -105,7 +108,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyObject; @@ -121,10 +124,19 @@ public class RecoverySourceHandlerTests extends ESTestCase { private final ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); private ThreadPool threadPool; + private Executor recoveryExecutor; @Before public void setUpThreadPool() { - threadPool = new TestThreadPool(getTestName()); + if (randomBoolean()) { + threadPool = new TestThreadPool(getTestName()); + recoveryExecutor = threadPool.generic(); + } else { + // verify that both sending and receiving files can be completed with a single thread + threadPool = new TestThreadPool(getTestName(), + new FixedExecutorBuilder(Settings.EMPTY, "recovery_executor", between(1, 16), between(16, 128), "recovery_executor")); + recoveryExecutor = threadPool.executor("recovery_executor"); + } } @After @@ -133,9 +145,7 @@ public void tearDownThreadPool() { } public void testSendFiles() throws Throwable { - Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1). - put("indices.recovery.concurrent_small_file_streams", 1).build(); - final RecoverySettings recoverySettings = new RecoverySettings(settings, service); + final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service); final StartRecoveryRequest request = getStartRecoveryRequest(); Store store = newStore(createTempDir()); Directory dir = store.directory(); @@ -156,38 +166,22 @@ public void testSendFiles() throws Throwable { metas.add(md); } Store targetStore = newStore(createTempDir()); + MultiFileWriter multiFileWriter = new MultiFileWriter(targetStore, mock(RecoveryState.Index.class), "", logger, () -> {}); RecoveryTargetHandler target = new TestRecoveryTargetHandler() { - IndexOutputOutputStream out; @Override public void writeFileChunk(StoreFileMetaData md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener listener) { - try { - if (position == 0) { - out = new IndexOutputOutputStream(targetStore.createVerifyingOutput(md.name(), md, IOContext.DEFAULT)) { - @Override - public void close() throws IOException { - super.close(); - targetStore.directory().sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it - } - }; - } - final BytesRefIterator iterator = content.iterator(); - BytesRef scratch; - while ((scratch = iterator.next()) != null) { - out.write(scratch.bytes, scratch.offset, scratch.length); - } - if (lastChunk) { - out.close(); - } - listener.onResponse(null); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith(listener, () -> { + multiFileWriter.writeFileChunk(md, position, content, lastChunk); + return null; + }); } }; - RecoverySourceHandler handler = new RecoverySourceHandler(null, target, request, - Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 5)); - handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0); + RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), + threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 5)); + PlainActionFuture sendFilesFuture = new PlainActionFuture<>(); + handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0, sendFilesFuture); + sendFilesFuture.actionGet(); Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null); Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata); assertEquals(metas.size(), recoveryDiff.identical.size()); @@ -195,7 +189,7 @@ public void close() throws IOException { assertEquals(0, recoveryDiff.missing.size()); IndexReader reader = DirectoryReader.open(targetStore.directory()); assertEquals(numDocs, reader.maxDoc()); - IOUtils.close(reader, store, targetStore); + IOUtils.close(reader, store, multiFileWriter, targetStore); } public StartRecoveryRequest getStartRecoveryRequest() throws IOException { @@ -241,10 +235,11 @@ public void indexTranslogOperations(List operations, int tot RetentionLeases retentionLeases, long mappingVersion, ActionListener listener) { shippedOps.addAll(operations); checkpointOnTarget.set(randomLongBetween(checkpointOnTarget.get(), Long.MAX_VALUE)); - listener.onResponse(checkpointOnTarget.get()); } + listener.onResponse(checkpointOnTarget.get()); + } }; - RecoverySourceHandler handler = new RecoverySourceHandler( - shard, new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), request, fileChunkSizeInBytes, between(1, 10)); + RecoverySourceHandler handler = new RecoverySourceHandler(shard, new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), + threadPool, request, fileChunkSizeInBytes, between(1, 10)); PlainActionFuture future = new PlainActionFuture<>(); handler.phase2(startingSeqNo, endingSeqNo, newTranslogSnapshot(operations, Collections.emptyList()), randomNonNegativeLong(), randomNonNegativeLong(), RetentionLeases.EMPTY, randomNonNegativeLong(), future); @@ -283,8 +278,8 @@ public void indexTranslogOperations(List operations, int tot } } }; - RecoverySourceHandler handler = new RecoverySourceHandler( - shard, new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), request, fileChunkSizeInBytes, between(1, 10)); + RecoverySourceHandler handler = new RecoverySourceHandler(shard, new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), + threadPool, request, fileChunkSizeInBytes, between(1, 10)); PlainActionFuture future = new PlainActionFuture<>(); final long startingSeqNo = randomLongBetween(0, ops.size() - 1L); final long endingSeqNo = randomLongBetween(startingSeqNo, ops.size() - 1L); @@ -343,52 +338,36 @@ public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable { (p.getFileName().toString().equals("write.lock") || p.getFileName().toString().startsWith("extra")) == false)); Store targetStore = newStore(createTempDir(), false); + MultiFileWriter multiFileWriter = new MultiFileWriter(targetStore, mock(RecoveryState.Index.class), "", logger, () -> {}); RecoveryTargetHandler target = new TestRecoveryTargetHandler() { - IndexOutputOutputStream out; @Override public void writeFileChunk(StoreFileMetaData md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener listener) { - try { - if (position == 0) { - out = new IndexOutputOutputStream(targetStore.createVerifyingOutput(md.name(), md, IOContext.DEFAULT)) { - @Override - public void close() throws IOException { - super.close(); - targetStore.directory().sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it - } - }; - } - final BytesRefIterator iterator = content.iterator(); - BytesRef scratch; - while ((scratch = iterator.next()) != null) { - out.write(scratch.bytes, scratch.offset, scratch.length); - } - if (lastChunk) { - out.close(); - } - listener.onResponse(null); - } catch (Exception e) { - IOUtils.closeWhileHandlingException(out, () -> listener.onFailure(e)); - } + ActionListener.completeWith(listener, () -> { + multiFileWriter.writeFileChunk(md, position, content, lastChunk); + return null; + }); } }; - RecoverySourceHandler handler = new RecoverySourceHandler(null, target, request, - Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 8)) { + RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, + request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 8)) { @Override protected void failEngine(IOException cause) { assertFalse(failedEngine.get()); failedEngine.set(true); } }; - - try { - handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0); - fail("corrupted index"); - } catch (IOException ex) { - assertNotNull(ExceptionsHelper.unwrapCorruption(ex)); - } + SetOnce sendFilesError = new SetOnce<>(); + CountDownLatch latch = new CountDownLatch(1); + handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0, + new LatchedActionListener<>(ActionListener.wrap(r -> sendFilesError.set(null), e -> sendFilesError.set(e)), latch)); + latch.await(); + assertThat(sendFilesError.get(), instanceOf(IOException.class)); + assertNotNull(ExceptionsHelper.unwrapCorruption(sendFilesError.get())); assertTrue(failedEngine.get()); - IOUtils.close(store, targetStore); + // ensure all chunk requests have been completed; otherwise some files on the target are left open. + IOUtils.close(() -> terminate(threadPool), () -> threadPool = null); + IOUtils.close(store, multiFileWriter, targetStore); } @@ -427,28 +406,24 @@ public void writeFileChunk(StoreFileMetaData md, long position, BytesReference c } } }; - RecoverySourceHandler handler = new RecoverySourceHandler(null, target, request, - Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 10)) { + RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, + request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 10)) { @Override protected void failEngine(IOException cause) { assertFalse(failedEngine.get()); failedEngine.set(true); } }; - try { - handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0); - fail("exception index"); - } catch (RuntimeException ex) { - final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex); - if (throwCorruptedIndexException) { - assertNotNull(unwrappedCorruption); - assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]"); - } else { - assertNull(unwrappedCorruption); - assertEquals(ex.getMessage(), "boom"); - } - } catch (CorruptIndexException ex) { - fail("not expected here"); + PlainActionFuture sendFilesFuture = new PlainActionFuture<>(); + handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0, sendFilesFuture); + Exception ex = expectThrows(Exception.class, sendFilesFuture::actionGet); + final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex); + if (throwCorruptedIndexException) { + assertNotNull(unwrappedCorruption); + assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]"); + } else { + assertNull(unwrappedCorruption); + assertEquals(ex.getMessage(), "boom"); } assertFalse(failedEngine.get()); IOUtils.close(store); @@ -472,6 +447,7 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE final RecoverySourceHandler handler = new RecoverySourceHandler( shard, mock(RecoveryTargetHandler.class), + threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 8)) { @@ -550,19 +526,13 @@ public void writeFileChunk(StoreFileMetaData md, long position, BytesReference c }; final int maxConcurrentChunks = between(1, 8); final int chunkSize = between(1, 32); - final RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, getStartRecoveryRequest(), + final RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, threadPool, getStartRecoveryRequest(), chunkSize, maxConcurrentChunks); Store store = newStore(createTempDir(), false); List files = generateFiles(store, between(1, 10), () -> between(1, chunkSize * 20)); int totalChunks = files.stream().mapToInt(md -> ((int) md.length() + chunkSize - 1) / chunkSize).sum(); - Thread sender = new Thread(() -> { - try { - handler.sendFiles(store, files.toArray(new StoreFileMetaData[0]), () -> 0); - } catch (Exception ex) { - throw new AssertionError(ex); - } - }); - sender.start(); + PlainActionFuture sendFilesFuture = new PlainActionFuture<>(); + handler.sendFiles(store, files.toArray(new StoreFileMetaData[0]), () -> 0, sendFilesFuture); assertBusy(() -> { assertThat(sentChunks.get(), equalTo(Math.min(totalChunks, maxConcurrentChunks))); assertThat(unrepliedChunks, hasSize(sentChunks.get())); @@ -594,13 +564,11 @@ public void writeFileChunk(StoreFileMetaData md, long position, BytesReference c assertThat(unrepliedChunks, hasSize(expectedUnrepliedChunks)); }); } - sender.join(); + sendFilesFuture.actionGet(); store.close(); } public void testSendFileChunksStopOnError() throws Exception { - final IndexShard shard = mock(IndexShard.class); - when(shard.state()).thenReturn(IndexShardState.STARTED); final List unrepliedChunks = new CopyOnWriteArrayList<>(); final AtomicInteger sentChunks = new AtomicInteger(); final TestRecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() { @@ -616,23 +584,23 @@ public void writeFileChunk(StoreFileMetaData md, long position, BytesReference c }; final int maxConcurrentChunks = between(1, 4); final int chunkSize = between(1, 16); - final RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, getStartRecoveryRequest(), - chunkSize, maxConcurrentChunks); + final RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(recoveryTarget, recoveryExecutor), + threadPool, getStartRecoveryRequest(), chunkSize, maxConcurrentChunks); Store store = newStore(createTempDir(), false); List files = generateFiles(store, between(1, 10), () -> between(1, chunkSize * 20)); int totalChunks = files.stream().mapToInt(md -> ((int) md.length() + chunkSize - 1) / chunkSize).sum(); - AtomicReference error = new AtomicReference<>(); - Thread sender = new Thread(() -> { - try { - handler.sendFiles(store, files.toArray(new StoreFileMetaData[0]), () -> 0); - } catch (Exception ex) { - error.set(ex); - } - }); - sender.start(); + SetOnce sendFilesError = new SetOnce<>(); + CountDownLatch sendFilesLatch = new CountDownLatch(1); + handler.sendFiles(store, files.toArray(new StoreFileMetaData[0]), () -> 0, + new LatchedActionListener<>(ActionListener.wrap(r -> sendFilesError.set(null), e -> sendFilesError.set(e)), sendFilesLatch)); assertBusy(() -> assertThat(sentChunks.get(), equalTo(Math.min(totalChunks, maxConcurrentChunks)))); List failedChunks = randomSubsetOf(between(1, unrepliedChunks.size()), unrepliedChunks); - failedChunks.forEach(c -> c.listener.onFailure(new RuntimeException("test chunk exception"))); + CountDownLatch replyLatch = new CountDownLatch(failedChunks.size()); + failedChunks.forEach(c -> { + c.listener.onFailure(new IllegalStateException("test chunk exception")); + replyLatch.countDown(); + }); + replyLatch.await(); unrepliedChunks.removeAll(failedChunks); unrepliedChunks.forEach(c -> { if (randomBoolean()) { @@ -641,12 +609,75 @@ public void writeFileChunk(StoreFileMetaData md, long position, BytesReference c c.listener.onResponse(null); } }); - assertBusy(() -> { - assertThat(error.get(), notNullValue()); - assertThat(error.get().getMessage(), containsString("test chunk exception")); - }); + sendFilesLatch.await(); + assertThat(sendFilesError.get(), instanceOf(IllegalStateException.class)); + assertThat(sendFilesError.get().getMessage(), containsString("test chunk exception")); assertThat("no more chunks should be sent", sentChunks.get(), equalTo(Math.min(totalChunks, maxConcurrentChunks))); - sender.join(); + store.close(); + } + + public void testCancelRecoveryDuringPhase1() throws Exception { + Store store = newStore(createTempDir("source"), false); + IndexShard shard = mock(IndexShard.class); + when(shard.store()).thenReturn(store); + Directory dir = store.directory(); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig()); + int numDocs = randomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new StringField("id", Integer.toString(i), Field.Store.YES)); + document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED)); + writer.addDocument(document); + } + writer.commit(); + writer.close(); + AtomicBoolean wasCancelled = new AtomicBoolean(); + SetOnce cancelRecovery = new SetOnce<>(); + final TestRecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() { + @Override + public void receiveFileInfo(List phase1FileNames, List phase1FileSizes, List phase1ExistingFileNames, + List phase1ExistingFileSizes, int totalTranslogOps, ActionListener listener) { + recoveryExecutor.execute(() -> listener.onResponse(null)); + if (randomBoolean()) { + wasCancelled.set(true); + cancelRecovery.get().run(); + } + } + + @Override + public void writeFileChunk(StoreFileMetaData md, long position, BytesReference content, + boolean lastChunk, int totalTranslogOps, ActionListener listener) { + recoveryExecutor.execute(() -> listener.onResponse(null)); + if (rarely()) { + wasCancelled.set(true); + cancelRecovery.get().run(); + } + } + + @Override + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, + ActionListener listener) { + recoveryExecutor.execute(() -> listener.onResponse(null)); + if (randomBoolean()) { + wasCancelled.set(true); + cancelRecovery.get().run(); + } + } + }; + final RecoverySourceHandler handler = new RecoverySourceHandler( + shard, recoveryTarget, threadPool, getStartRecoveryRequest(), between(1, 16), between(1, 4)); + cancelRecovery.set(() -> handler.cancel("test")); + final StepListener phase1Listener = new StepListener<>(); + try { + final CountDownLatch latch = new CountDownLatch(1); + handler.phase1(DirectoryReader.listCommits(dir).get(0), randomNonNegativeLong(), () -> 0, + new LatchedActionListener<>(phase1Listener, latch)); + latch.await(); + phase1Listener.result(); + } catch (Exception e) { + assertTrue(wasCancelled.get()); + assertNotNull(ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class)); + } store.close(); } @@ -654,7 +685,7 @@ public void testVerifySeqNoStatsWhenRecoverWithSyncId() throws Exception { IndexShard shard = mock(IndexShard.class); when(shard.state()).thenReturn(IndexShardState.STARTED); RecoverySourceHandler handler = new RecoverySourceHandler( - shard, new TestRecoveryTargetHandler(), getStartRecoveryRequest(), between(1, 16), between(1, 4)); + shard, new TestRecoveryTargetHandler(), threadPool, getStartRecoveryRequest(), between(1, 16), between(1, 4)); String syncId = UUIDs.randomBase64UUID(); int numDocs = between(0, 1000); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java index 126c4df7928cd..f2ebf5a210812 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.indices.recovery; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -31,6 +32,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(numDataNodes = 2) @@ -46,10 +48,12 @@ public void testPromoteReplicaToPrimary() throws Exception { createIndex(indexName); final int numOfDocs = scaledRandomIntBetween(0, 200); - try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) { - waitForDocs(numOfDocs, indexer); + if (numOfDocs > 0) { + try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) { + waitForDocs(numOfDocs, indexer); + } + refresh(indexName); } - refresh(indexName); assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); ensureGreen(indexName); @@ -57,7 +61,8 @@ public void testPromoteReplicaToPrimary() throws Exception { // sometimes test with a closed index final IndexMetaData.State indexState = randomFrom(IndexMetaData.State.OPEN, IndexMetaData.State.CLOSE); if (indexState == IndexMetaData.State.CLOSE) { - assertAcked(client().admin().indices().prepareClose(indexName)); + CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose(indexName).get(); + assertThat("close index not acked - " + closeIndexResponse, closeIndexResponse.isAcknowledged(), equalTo(true)); ensureGreen(indexName); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java index e77bf5f8d4ae5..549052ba675e5 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java @@ -65,8 +65,7 @@ public void testSerialization() throws Exception { final ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); InputStreamStreamInput in = new InputStreamStreamInput(inBuffer); in.setVersion(targetNodeVersion); - final StartRecoveryRequest inRequest = new StartRecoveryRequest(); - inRequest.readFrom(in); + final StartRecoveryRequest inRequest = new StartRecoveryRequest(in); assertThat(outRequest.shardId(), equalTo(inRequest.shardId())); assertThat(outRequest.targetAllocationId(), equalTo(inRequest.targetAllocationId())); diff --git a/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java b/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java index 406ed68b51784..6df367d05a572 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -63,13 +63,13 @@ public List> getSettings() { return Arrays.asList(INDEX_INTERNAL_SETTING, INDEX_PRIVATE_SETTING); } - public static class UpdateInternalOrPrivateAction extends StreamableResponseActionType { + public static class UpdateInternalOrPrivateAction extends ActionType { public static final UpdateInternalOrPrivateAction INSTANCE = new UpdateInternalOrPrivateAction(); private static final String NAME = "indices:admin/settings/update-internal-or-private-index"; public UpdateInternalOrPrivateAction() { - super(NAME); + super(NAME, UpdateInternalOrPrivateAction.Response::new); } public static class Request extends MasterNodeRequest { @@ -78,8 +78,13 @@ public static class Request extends MasterNodeRequest { private String key; private String value; - Request() { + Request() {} + Request(StreamInput in) throws IOException { + super(in); + index = in.readString(); + key = in.readString(); + value = in.readString(); } public Request(final String index, final String key, final String value) { @@ -93,14 +98,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - key = in.readString(); - value = in.readString(); - } - @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); @@ -112,15 +109,16 @@ public void writeTo(final StreamOutput out) throws IOException { } static class Response extends ActionResponse { + Response() {} + + Response(StreamInput in) throws IOException { + super(in); + } + @Override public void writeTo(StreamOutput out) throws IOException {} } - @Override - public UpdateInternalOrPrivateAction.Response newResponse() { - return new UpdateInternalOrPrivateAction.Response(); - } - } public static class TransportUpdateInternalOrPrivateAction @@ -139,8 +137,8 @@ public TransportUpdateInternalOrPrivateAction( clusterService, threadPool, actionFilters, - indexNameExpressionResolver, - UpdateInternalOrPrivateAction.Request::new); + UpdateInternalOrPrivateAction.Request::new, + indexNameExpressionResolver); } @Override @@ -149,8 +147,8 @@ protected String executor() { } @Override - protected UpdateInternalOrPrivateAction.Response newResponse() { - return new UpdateInternalOrPrivateAction.Response(); + protected UpdateInternalOrPrivateAction.Response read(StreamInput in) throws IOException { + return new UpdateInternalOrPrivateAction.Response(in); } @Override diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 777f4eb13eeea..5e06e557975ea 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.node; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; @@ -149,6 +150,7 @@ private static Settings.Builder baseSettings() { } public void testCloseOnOutstandingTask() throws Exception { + assumeFalse("https://github.com/elastic/elasticsearch/issues/44256", Constants.WINDOWS); Node node = new MockNode(baseSettings().build(), basePlugins()); node.start(); ThreadPool threadpool = node.injector().getInstance(ThreadPool.class); diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 11f820ae36cd8..c41caf1523f32 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -64,7 +64,7 @@ public void testNodeInfoStreaming() throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { nodeInfo.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { - NodeInfo readNodeInfo = NodeInfo.readNodeInfo(in); + NodeInfo readNodeInfo = new NodeInfo(in); assertExpectedUnchanged(nodeInfo, readNodeInfo); } } diff --git a/server/src/test/java/org/elasticsearch/persistent/CancelPersistentTaskRequestTests.java b/server/src/test/java/org/elasticsearch/persistent/CancelPersistentTaskRequestTests.java index 2ce82c1e79941..90ead35a6062c 100644 --- a/server/src/test/java/org/elasticsearch/persistent/CancelPersistentTaskRequestTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/CancelPersistentTaskRequestTests.java @@ -18,12 +18,13 @@ */ package org.elasticsearch.persistent; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.persistent.RemovePersistentTaskAction.Request; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -public class CancelPersistentTaskRequestTests extends AbstractStreamableTestCase { +public class CancelPersistentTaskRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -31,7 +32,7 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java index ce1f6a245f6a2..c0bdb8d74aae0 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java @@ -56,9 +56,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.Optional; -import java.util.Set; import static org.elasticsearch.cluster.metadata.MetaData.CONTEXT_MODE_GATEWAY; import static org.elasticsearch.cluster.metadata.MetaData.CONTEXT_MODE_SNAPSHOT; @@ -269,11 +267,6 @@ public void testMinVersionSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); out.setVersion(streamVersion); - Set features = new HashSet<>(); - if (randomBoolean()) { - features.add("test"); - } - out.setFeatures(features); tasks.build().writeTo(out); final StreamInput input = out.bytes().streamInput(); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorResponseTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorResponseTests.java index 342098f6867f7..ea0d8b2e841ad 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorResponseTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorResponseTests.java @@ -20,14 +20,15 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.Collections; -public class PersistentTasksExecutorResponseTests extends AbstractStreamableTestCase { +public class PersistentTasksExecutorResponseTests extends AbstractWireSerializingTestCase { @Override protected PersistentTaskResponse createTestInstance() { @@ -37,13 +38,13 @@ protected PersistentTaskResponse createTestInstance() { new TestPersistentTasksPlugin.TestParams("test"), randomLong(), PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT)); } else { - return new PersistentTaskResponse(null); + return new PersistentTaskResponse((PersistentTask) null); } } @Override - protected PersistentTaskResponse createBlankInstance() { - return new PersistentTaskResponse(); + protected Writeable.Reader instanceReader() { + return PersistentTaskResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/persistent/RestartPersistentTaskRequestTests.java b/server/src/test/java/org/elasticsearch/persistent/RestartPersistentTaskRequestTests.java index 3ce29d543d4b9..d0bbd4521f49e 100644 --- a/server/src/test/java/org/elasticsearch/persistent/RestartPersistentTaskRequestTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/RestartPersistentTaskRequestTests.java @@ -18,10 +18,11 @@ */ package org.elasticsearch.persistent; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.persistent.CompletionPersistentTaskAction.Request; +import org.elasticsearch.test.AbstractWireSerializingTestCase; -public class RestartPersistentTaskRequestTests extends AbstractStreamableTestCase { +public class RestartPersistentTaskRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -29,7 +30,7 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } } diff --git a/server/src/test/java/org/elasticsearch/persistent/StartPersistentActionRequestTests.java b/server/src/test/java/org/elasticsearch/persistent/StartPersistentActionRequestTests.java index e4c5a26de9c0c..80155256d4f8a 100644 --- a/server/src/test/java/org/elasticsearch/persistent/StartPersistentActionRequestTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/StartPersistentActionRequestTests.java @@ -21,14 +21,15 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.persistent.StartPersistentTaskAction.Request; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.Collections; -public class StartPersistentActionRequestTests extends AbstractStreamableTestCase { +public class StartPersistentActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -43,8 +44,8 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 4cea01e65181a..b92481954bf3d 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -337,7 +337,7 @@ protected void nodeOperation(AllocatedPersistentTask task, TestParams params, Pe assertTrue(awaitBusy(() -> testTask.isCancelled() || testTask.getOperation() != null || clusterService.lifecycleState() != Lifecycle.State.STARTED, // speedup finishing on closed nodes - 30, TimeUnit.SECONDS)); // This can take a while during large cluster restart + 45, TimeUnit.SECONDS)); // This can take a while during large cluster restart if (clusterService.lifecycleState() != Lifecycle.State.STARTED) { return; } @@ -401,12 +401,7 @@ public static class TestTaskAction extends ActionType { public static final String NAME = "cluster:admin/persistent/task_test"; private TestTaskAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return TestTasksResponse::new; + super(NAME, TestTasksResponse::new); } } diff --git a/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java b/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java index 5ae54640f8e31..787b5ad5d89fe 100644 --- a/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java @@ -20,14 +20,15 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction.Request; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.Collections; -public class UpdatePersistentTaskRequestTests extends AbstractStreamableTestCase { +public class UpdatePersistentTaskRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -35,8 +36,8 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java new file mode 100644 index 0000000000000..7dfd294e8ae34 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.nullValue; + +public class RestReloadSecureSettingsActionTests extends ESTestCase { + + public void testParserWithPassword() throws Exception { + final String request = "{" + + "\"secure_settings_password\": \"secure_settings_password_string\"" + + "}"; + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, request)) { + NodesReloadSecureSettingsRequest reloadSecureSettingsRequest = RestReloadSecureSettingsAction.PARSER.parse(parser, null); + assertEquals("secure_settings_password_string", reloadSecureSettingsRequest.getSecureSettingsPassword().toString()); + } + } + + public void testParserWithoutPassword() throws Exception { + final String request = "{" + + "}"; + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, request)) { + NodesReloadSecureSettingsRequest reloadSecureSettingsRequest = RestReloadSecureSettingsAction.PARSER.parse(parser, null); + assertThat(reloadSecureSettingsRequest.getSecureSettingsPassword(), nullValue()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestUpdateActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestUpdateActionTests.java index cea3e9727e275..119057a66d93c 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestUpdateActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestUpdateActionTests.java @@ -19,18 +19,31 @@ package org.elasticsearch.rest.action.document; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest.Method; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.test.rest.RestActionTestCase; import org.junit.Before; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.mockito.Mockito.mock; + public class RestUpdateActionTests extends RestActionTestCase { + private RestUpdateAction action; + @Before public void setUpAction() { - new RestUpdateAction(Settings.EMPTY, controller()); + action = new RestUpdateAction(Settings.EMPTY, controller()); } public void testTypeInPath() { @@ -47,4 +60,32 @@ public void testTypeInPath() { .build(); dispatchRequest(validRequest); } + + public void testUpdateDocVersion() { + Map params = new HashMap<>(); + if (randomBoolean()) { + params.put("version", Long.toString(randomNonNegativeLong())); + params.put("version_type", randomFrom(VersionType.values()).name()); + } else if (randomBoolean()) { + params.put("version", Long.toString(randomNonNegativeLong())); + } else { + params.put("version_type", randomFrom(VersionType.values()).name()); + } + String content = + "{\n" + + " \"doc\" : {\n" + + " \"name\" : \"new_name\"\n" + + " }\n" + + "}"; + FakeRestRequest updateRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.POST) + .withPath("test/_update/1") + .withParams(params) + .withContent(new BytesArray(content), XContentType.JSON) + .build(); + ActionRequestValidationException e = expectThrows(ActionRequestValidationException.class, + () -> action.prepareRequest(updateRequest, mock(NodeClient.class))); + assertThat(e.getMessage(), containsString("internal versioning can not be used for optimistic concurrency control. " + + "Please use `if_seq_no` and `if_primary_term` instead")); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index bc4c84282b262..57020901aaeea 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -108,10 +108,7 @@ public void testGetAppropriateRoundingUsesCorrectIntervals() { assertThat(result, equalTo(2)); } - - @Override - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39497") - // TODO: When resolving the above AwaitsFix, just delete this override. Method is only overriden to apply the annotation. + public void testReduceRandom() { super.testReduceRandom(); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java index 3e86571ae45e9..b86acbeea046f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java @@ -19,11 +19,13 @@ package org.elasticsearch.search.aggregations.metrics; +import org.apache.lucene.document.Document; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; @@ -32,19 +34,95 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.lookup.LeafDocLookup; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.function.Function; import static java.util.Collections.singleton; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; public class AvgAggregatorTests extends AggregatorTestCase { + /** Script to take a field name in params and sum the values of the field. */ + public static final String SUM_FIELD_PARAMS_SCRIPT = "sum_field_params"; + + /** Script to sum the values of a field named {@code values}. */ + public static final String SUM_VALUES_FIELD_SCRIPT = "sum_values_field"; + + /** Script to return the value of a field named {@code value}. */ + public static final String VALUE_FIELD_SCRIPT = "value_field"; + + /** Script to return the {@code _value} provided by aggs framework. */ + public static final String VALUE_SCRIPT = "_value"; + + @Override + protected ScriptService getMockScriptService() { + Map, Object>> scripts = new HashMap<>(); + Function, Integer> getInc = vars -> { + if (vars == null || vars.containsKey("inc") == false) { + return 0; + } else { + return ((Number) vars.get("inc")).intValue(); + } + }; + + BiFunction, String, Object> sum = (vars, fieldname) -> { + int inc = getInc.apply(vars); + LeafDocLookup docLookup = (LeafDocLookup) vars.get("doc"); + List values = new ArrayList<>(); + for (Object v : docLookup.get(fieldname)) { + values.add(((Number) v).longValue() + inc); + } + return values; + }; + + scripts.put(SUM_FIELD_PARAMS_SCRIPT, vars -> { + String fieldname = (String) vars.get("field"); + return sum.apply(vars, fieldname); + }); + scripts.put(SUM_VALUES_FIELD_SCRIPT, vars -> sum.apply(vars, "values")); + scripts.put(VALUE_FIELD_SCRIPT, vars -> sum.apply(vars, "value")); + scripts.put(VALUE_SCRIPT, vars -> { + int inc = getInc.apply(vars); + return ((Number) vars.get("_value")).doubleValue() + inc; + }); + + MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, + scripts, + Collections.emptyMap()); + Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); + + return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + } + public void testNoDocs() throws IOException { testCase(new MatchAllDocsQuery(), iw -> { // Intentionally not writing any docs @@ -139,28 +217,361 @@ public void testSummationAccuracy() throws IOException { verifyAvgOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); } + public void testUnmappedField() throws IOException { + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number"); + testCase(aggregationBuilder, new DocValuesFieldExistsQuery("number"), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 1))); + }, avg -> { + assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); + }, null); + } + + public void testUnmappedWithMissingField() throws IOException { + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number").missing(0L); + testCase(aggregationBuilder, new DocValuesFieldExistsQuery("number"), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 1))); + }, avg -> { + assertEquals(0.0, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, null); + } + private void verifyAvgOfDoubles(double[] values, double expected, double delta) throws IOException { - testCase(new MatchAllDocsQuery(), + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number"); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + fieldType.setName("number"); + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { for (double value : values) { iw.addDocument(singleton(new NumericDocValuesField("number", NumericUtils.doubleToSortableLong(value)))); } }, avg -> assertEquals(expected, avg.getValue(), delta), - NumberFieldMapper.NumberType.DOUBLE + fieldType ); } + public void testSingleValuedFieldPartiallyUnmapped() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + indexWriter.addDocument(singleton(new NumericDocValuesField("number", 7))); + indexWriter.addDocument(singleton(new NumericDocValuesField("number", 2))); + indexWriter.addDocument(singleton(new NumericDocValuesField("number", 3))); + indexWriter.close(); + + Directory unmappedDirectory = newDirectory(); + RandomIndexWriter unmappedIndexWriter = new RandomIndexWriter(random(), unmappedDirectory); + unmappedIndexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexReader unamappedIndexReader = DirectoryReader.open(unmappedDirectory); + MultiReader multiReader = new MultiReader(indexReader, unamappedIndexReader); + IndexSearcher indexSearcher = newSearcher(multiReader, true, true); + + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("number"); + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number"); + + AvgAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + InternalAvg avg = (InternalAvg) aggregator.buildAggregation(0L); + + assertEquals(4, avg.getValue(), 0); + assertEquals(3, avg.getCount(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + + multiReader.close(); + directory.close(); + unmappedDirectory.close(); + } + + public void testSingleValuedField() throws IOException { + testCase(new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 2))); + iw.addDocument(singleton(new NumericDocValuesField("number", 3))); + }, avg -> { + assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + assertEquals(4.0, avg.getProperty("value")); + }); + } + + public void testSingleValuedField_WithFormatter() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .format("#") + .field("value") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, avg -> { + assertEquals((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10, avg.getValue(),0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + assertEquals("6", avg.getValueAsString()); + }, fieldType); + } + + public void testSingleValuedFieldWithValueScript() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .field("value") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, avg -> { + assertEquals((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, fieldType); + } + + public void testScriptSingleValued() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_FIELD_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, avg -> { + assertEquals((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, fieldType); + } + + public void testScriptSingleValuedWithParams() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + Map params = new HashMap<>(); + params.put("inc", 1); + params.put("field", "value"); + + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, SUM_FIELD_PARAMS_SCRIPT, params)); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, avg -> { + assertEquals((double) (2+3+4+5+6+7+8+9+10+11) / 10, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, fieldType); + } + + public void testMultiValuedField() throws IOException { + testCase(new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("number", i + 2)); + document.add(new SortedNumericDocValuesField("number", i + 3)); + iw.addDocument(document); + } + }, avg -> { + assertEquals((2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }); + } + + public void testScriptMultiValued() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + iw.addDocument(document); + } + }, avg -> { + assertEquals((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, fieldType); + } + + public void testScriptMultiValuedWithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); + params.put("field", "values"); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, SUM_FIELD_PARAMS_SCRIPT, params)); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + iw.addDocument(document); + } + }, avg -> { + assertEquals((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, fieldType); + } + + public void testSingleValuedFieldWithValueScriptWithParams() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + Map params = Collections.singletonMap("inc", 1); + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .field("value") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, params)); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, avg -> { + assertEquals((double) (2+3+4+5+6+7+8+9+10+11) / 10, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, fieldType); + } + + public void testMultiValuedFieldWithValueScriptWithParams() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + Map params = Collections.singletonMap("inc", 1); + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .field("values") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, params)); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + iw.addDocument(document); } + }, avg -> { + assertEquals((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, fieldType); + } + + public void testMultiValuedFieldWithValueScript() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name") + .field("values") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + iw.addDocument(document); } + }, avg -> { + assertEquals((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + }, fieldType); + } + + public void testOrderByEmptyAggregation() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + fieldType.setHasDocValues(true); + + AggregationBuilder aggregationBuilder = new TermsAggregationBuilder("terms", ValueType.NUMERIC) + .field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>avg", true))) + .subAggregation(AggregationBuilders.filter("filter", termQuery("value", 100)) + .subAggregation(AggregationBuilders.avg("avg").field("value"))); + + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + Terms terms = (Terms) aggregator.buildAggregation(0L); + assertNotNull(terms); + List buckets = terms.getBuckets(); + assertNotNull(buckets); + assertEquals(10, buckets.size()); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertNotNull(bucket); + assertEquals((long) i + 1, bucket.getKeyAsNumber()); + assertEquals(1L, bucket.getDocCount()); + + Filter filter = bucket.getAggregations().get("filter"); + assertNotNull(filter); + assertEquals(0L, filter.getDocCount()); + + Avg avg = filter.getAggregations().get("avg"); + assertNotNull(avg); + assertEquals(Double.NaN, avg.getValue(), 0); + } + + indexReader.close(); + directory.close(); + } + private void testCase(Query query, CheckedConsumer buildIndex, Consumer verify) throws IOException { - testCase(query, buildIndex, verify, NumberFieldMapper.NumberType.LONG); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("number"); + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number"); + testCase(aggregationBuilder, query, buildIndex, verify, fieldType); } - private void testCase(Query query, + private void testCase(AvgAggregationBuilder aggregationBuilder, Query query, CheckedConsumer buildIndex, - Consumer verify, - NumberFieldMapper.NumberType fieldNumberType) throws IOException { + Consumer verify, MappedFieldType fieldType) throws IOException { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); buildIndex.accept(indexWriter); @@ -169,10 +580,6 @@ private void testCase(Query query, IndexReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number"); - MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(fieldNumberType); - fieldType.setName("number"); - AvgAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); aggregator.preCollection(); indexSearcher.search(query, aggregator); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index d8696d461e499..e6652c74c8bea 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -18,334 +18,30 @@ */ package org.elasticsearch.search.aggregations.metrics; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.bucket.filter.Filter; -import org.elasticsearch.search.aggregations.bucket.global.Global; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.Collections; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; -import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; -import static org.elasticsearch.search.aggregations.AggregationBuilders.global; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.metrics.MetricAggScriptPlugin.METRIC_SCRIPT_ENGINE; import static org.elasticsearch.search.aggregations.metrics.MetricAggScriptPlugin.VALUE_FIELD_SCRIPT; -import static org.elasticsearch.search.aggregations.metrics.MetricAggScriptPlugin.SUM_FIELD_PARAMS_SCRIPT; -import static org.elasticsearch.search.aggregations.metrics.MetricAggScriptPlugin.SUM_VALUES_FIELD_SCRIPT; -import static org.elasticsearch.search.aggregations.metrics.MetricAggScriptPlugin.VALUE_SCRIPT; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -public class AvgIT extends AbstractNumericTestCase { +public class AvgIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { return Collections.singleton(MetricAggScriptPlugin.class); } - @Override - public void testEmptyAggregation() throws Exception { - - SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(avg("avg").field("value"))) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Avg avg = bucket.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(Double.isNaN(avg.getValue()), is(true)); - } - - @Override - public void testUnmapped() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("value")) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo(Double.NaN)); - } - - @Override - public void testSingleValuedField() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); - } - - @Override - public void testSingleValuedFieldGetProperty() throws Exception { - - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(avg("avg").field("value"))).get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Avg avg = global.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - double expectedAvgValue = (double) (1+2+3+4+5+6+7+8+9+10) / 10; - assertThat(avg.getValue(), equalTo(expectedAvgValue)); - assertThat((Avg) ((InternalAggregation)global).getProperty("avg"), equalTo(avg)); - assertThat((double) ((InternalAggregation)global).getProperty("avg.value"), equalTo(expectedAvgValue)); - assertThat((double) ((InternalAggregation)avg).getProperty("value"), equalTo(expectedAvgValue)); - } - - @Override - public void testSingleValuedFieldPartiallyUnmapped() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); - } - - @Override - public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("value") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, Collections.emptyMap()))) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); - } - - @Override - public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { - Map params = Collections.singletonMap("inc", 1); - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("value") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, params))) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10)); - } - - public void testSingleValuedField_WithFormatter() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(avg("avg").format("#").field("value")).get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(avg.getValueAsString(), equalTo("6")); - } - - @Override - public void testMultiValuedField() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("values")) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20)); - } - - @Override - public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("values") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, Collections.emptyMap()))) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20)); - } - - @Override - public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { - Map params = Collections.singletonMap("inc", 1); - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("values") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, params))) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20)); - } - - @Override - public void testScriptSingleValued() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap()))) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); - } - - @Override - public void testScriptSingleValuedWithParams() throws Exception { - Map params = new HashMap<>(); - params.put("inc", 1); - params.put("field", "value"); - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10)); - } - - @Override - public void testScriptMultiValued() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap()))) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20)); - } - - @Override - public void testScriptMultiValuedWithParams() throws Exception { - Map params = new HashMap<>(); - params.put("inc", 1); - params.put("field", "values"); - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(avg("avg") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))) - .get(); - - assertHitCount(searchResponse, 10); - - Avg avg = searchResponse.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.getName(), equalTo("avg")); - assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20)); - } - - @Override - public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>avg", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(avg("avg").field("value")))) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Avg avg = filter.getAggregations().get("avg"); - assertThat(avg, notNullValue()); - assertThat(avg.value(), equalTo(Double.NaN)); - - } - } - /** * Make sure that a request using a script does not get cached and a request * not using a script does get cached. diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index 3677da5df9b23..88aea1532f3da 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; @@ -117,6 +118,11 @@ protected Map, Object>> pluginScripts() { return value0 + value1 + value2; }); + scripts.put("single_input", vars -> { + double value = (double) vars.get("_value"); + return value; + }); + scripts.put("return null", vars -> null); return scripts; @@ -628,4 +634,159 @@ public void testPartiallyUnmapped() throws Exception { } } } + + public void testSingleBucketPathAgg() throws Exception { + XContentBuilder content = XContentFactory.jsonBuilder() + .startObject() + .field("buckets_path", "field2Sum") + .startObject("script") + .field("source", "single_input") + .field("lang", CustomScriptPlugin.NAME) + .endObject() + .endObject(); + BucketScriptPipelineAggregationBuilder bucketScriptAgg = + BucketScriptPipelineAggregationBuilder.parse("seriesArithmetic", createParser(content)); + + SearchResponse response = client() + .prepareSearch("idx", "idx_unmapped") + .addAggregation( + histogram("histo") + .field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(bucketScriptAgg)).get(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + for (int i = 0; i < buckets.size(); ++i) { + Histogram.Bucket bucket = buckets.get(i); + if (bucket.getDocCount() == 0) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("seriesArithmetic"); + assertThat(seriesArithmetic, nullValue()); + } else { + Sum field2Sum = bucket.getAggregations().get("field2Sum"); + assertThat(field2Sum, notNullValue()); + double field2SumValue = field2Sum.getValue(); + SimpleValue seriesArithmetic = bucket.getAggregations().get("seriesArithmetic"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertThat(seriesArithmeticValue, equalTo(field2SumValue)); + } + } + } + + public void testArrayBucketPathAgg() throws Exception { + XContentBuilder content = XContentFactory.jsonBuilder() + .startObject() + .array("buckets_path", "field2Sum", "field3Sum", "field4Sum") + .startObject("script") + .field("source", "_value0 + _value1 + _value2") + .field("lang", CustomScriptPlugin.NAME) + .endObject() + .endObject(); + BucketScriptPipelineAggregationBuilder bucketScriptAgg = + BucketScriptPipelineAggregationBuilder.parse("seriesArithmetic", createParser(content)); + + SearchResponse response = client() + .prepareSearch("idx", "idx_unmapped") + .addAggregation( + histogram("histo") + .field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation(bucketScriptAgg)).get(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + for (int i = 0; i < buckets.size(); ++i) { + Histogram.Bucket bucket = buckets.get(i); + if (bucket.getDocCount() == 0) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("seriesArithmetic"); + assertThat(seriesArithmetic, nullValue()); + } else { + Sum field2Sum = bucket.getAggregations().get("field2Sum"); + assertThat(field2Sum, notNullValue()); + double field2SumValue = field2Sum.getValue(); + Sum field3Sum = bucket.getAggregations().get("field3Sum"); + assertThat(field3Sum, notNullValue()); + double field3SumValue = field3Sum.getValue(); + Sum field4Sum = bucket.getAggregations().get("field4Sum"); + assertThat(field4Sum, notNullValue()); + double field4SumValue = field4Sum.getValue(); + SimpleValue seriesArithmetic = bucket.getAggregations().get("seriesArithmetic"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertThat(seriesArithmeticValue, equalTo(field2SumValue + field3SumValue + field4SumValue)); + } + } + } + + public void testObjectBucketPathAgg() throws Exception { + XContentBuilder content = XContentFactory.jsonBuilder() + .startObject() + .startObject("buckets_path") + .field("_value0", "field2Sum") + .field("_value1", "field3Sum") + .field("_value2", "field4Sum") + .endObject() + .startObject("script") + .field("source", "_value0 + _value1 + _value2") + .field("lang", CustomScriptPlugin.NAME) + .endObject() + .endObject(); + BucketScriptPipelineAggregationBuilder bucketScriptAgg = + BucketScriptPipelineAggregationBuilder.parse("seriesArithmetic", createParser(content)); + + SearchResponse response = client() + .prepareSearch("idx", "idx_unmapped") + .addAggregation( + histogram("histo") + .field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation(bucketScriptAgg)).get(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + for (int i = 0; i < buckets.size(); ++i) { + Histogram.Bucket bucket = buckets.get(i); + if (bucket.getDocCount() == 0) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("seriesArithmetic"); + assertThat(seriesArithmetic, nullValue()); + } else { + Sum field2Sum = bucket.getAggregations().get("field2Sum"); + assertThat(field2Sum, notNullValue()); + double field2SumValue = field2Sum.getValue(); + Sum field3Sum = bucket.getAggregations().get("field3Sum"); + assertThat(field3Sum, notNullValue()); + double field3SumValue = field3Sum.getValue(); + Sum field4Sum = bucket.getAggregations().get("field4Sum"); + assertThat(field4Sum, notNullValue()); + double field4SumValue = field4Sum.getValue(); + SimpleValue seriesArithmetic = bucket.getAggregations().get("seriesArithmetic"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertThat(seriesArithmeticValue, equalTo(field2SumValue + field3SumValue + field4SumValue)); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java index 20684b6383f1f..0746fa1782f59 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java @@ -19,11 +19,14 @@ package org.elasticsearch.search.aggregations.pipeline; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -59,4 +62,47 @@ protected BucketScriptPipelineAggregationBuilder createTestAggregatorFactory() { return factory; } + public void testParseBucketPath() throws IOException { + XContentBuilder content = XContentFactory.jsonBuilder() + .startObject() + .field("buckets_path", "_count") + .startObject("script") + .field("source", "value") + .field("lang", "expression") + .endObject() + .endObject(); + BucketScriptPipelineAggregationBuilder builder1 = BucketScriptPipelineAggregationBuilder.parse("count", createParser(content)); + assertEquals(builder1.getBucketsPaths().length , 1); + assertEquals(builder1.getBucketsPaths()[0], "_count"); + + content = XContentFactory.jsonBuilder() + .startObject() + .startObject("buckets_path") + .field("path1", "_count1") + .field("path2", "_count2") + .endObject() + .startObject("script") + .field("source", "value") + .field("lang", "expression") + .endObject() + .endObject(); + BucketScriptPipelineAggregationBuilder builder2 = BucketScriptPipelineAggregationBuilder.parse("count", createParser(content)); + assertEquals(builder2.getBucketsPaths().length , 2); + assertEquals(builder2.getBucketsPaths()[0], "_count1"); + assertEquals(builder2.getBucketsPaths()[1], "_count2"); + + content = XContentFactory.jsonBuilder() + .startObject() + .array("buckets_path","_count1", "_count2") + .startObject("script") + .field("source", "value") + .field("lang", "expression") + .endObject() + .endObject(); + BucketScriptPipelineAggregationBuilder builder3 = BucketScriptPipelineAggregationBuilder.parse("count", createParser(content)); + assertEquals(builder3.getBucketsPaths().length , 2); + assertEquals(builder3.getBucketsPaths()[0], "_count1"); + assertEquals(builder3.getBucketsPaths()[1], "_count2"); + } + } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/GapPolicyTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/GapPolicyTests.java new file mode 100644 index 0000000000000..be02b0f1d9683 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/GapPolicyTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.AbstractWriteableEnumTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class GapPolicyTests extends AbstractWriteableEnumTestCase { + + public GapPolicyTests() { + super(BucketHelpers.GapPolicy::readFrom); + } + + @Override + public void testValidOrdinals() { + assertThat(BucketHelpers.GapPolicy.INSERT_ZEROS.ordinal(), equalTo(0)); + assertThat(BucketHelpers.GapPolicy.SKIP.ordinal(), equalTo(1)); + } + + @Override + public void testFromString() { + assertThat(BucketHelpers.GapPolicy.parse("insert_zeros", null), equalTo(BucketHelpers.GapPolicy.INSERT_ZEROS)); + assertThat(BucketHelpers.GapPolicy.parse("skip", null), equalTo(BucketHelpers.GapPolicy.SKIP)); + ParsingException e = expectThrows(ParsingException.class, () -> BucketHelpers.GapPolicy.parse("does_not_exist", null)); + assertThat(e.getMessage(), + equalTo("Invalid gap policy: [does_not_exist], accepted values: [insert_zeros, skip]")); + } + + @Override + public void testReadFrom() throws IOException { + assertReadFromStream(0, BucketHelpers.GapPolicy.INSERT_ZEROS); + assertReadFromStream(1, BucketHelpers.GapPolicy.SKIP); + } + + @Override + public void testWriteTo() throws IOException { + assertWriteToStream(BucketHelpers.GapPolicy.INSERT_ZEROS, 0); + assertWriteToStream(BucketHelpers.GapPolicy.SKIP, 1); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index 152a57a6c1387..03e856ba8b68b 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -86,7 +86,7 @@ public void testFailedSearchWithWrongQuery() throws Exception { ClusterHealthResponse clusterHealth = client() .admin() .cluster() - .health(clusterHealthRequest("test").waitForYellowStatus().waitForNoRelocatingShards(true) + .health(clusterHealthRequest("test").waitForYellowStatus().waitForNoRelocatingShards(true).waitForEvents(Priority.LANGUID) .waitForActiveShards(test.totalNumShards)).actionGet(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); diff --git a/server/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java b/server/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java index 5142f0c9b73f5..24c69cb23916b 100644 --- a/server/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java @@ -118,7 +118,7 @@ protected CollapseBuilder mutateInstance(CollapseBuilder instance) throws IOExce case 2: default: newBuilder = copyInstance(instance); - List innerHits = newBuilder.getInnerHits(); + List innerHits = new ArrayList<>(newBuilder.getInnerHits()); for (int i = 0; i < between(1, 5); i++) { innerHits.add(InnerHitBuilderTests.randomInnerHits()); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightFieldTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightFieldTests.java index 7044a7b103098..4e77d35a4c5ec 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightFieldTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightFieldTests.java @@ -114,7 +114,7 @@ public void testSerialization() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { testField.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { - HighlightField deserializedCopy = HighlightField.readHighlightField(in); + HighlightField deserializedCopy = new HighlightField(in); assertEquals(testField, deserializedCopy); assertEquals(testField.hashCode(), deserializedCopy.hashCode()); assertNotSame(testField, deserializedCopy); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index afba40e2cb752..6d74127f071cb 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -52,6 +52,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import java.io.IOException; import java.time.Instant; @@ -793,14 +794,16 @@ public void testFuzzyQueryString() { assertFirstHit(searchResponse, hasId("1")); } - public void testQuotedQueryStringWithBoost() throws InterruptedException, ExecutionException { + @TestIssueLogging(value = "org.elasticsearch.search.query.SearchQueryIT:DEBUG", + issueUrl = "https://github.com/elastic/elasticsearch/issues/43144") + public void testQuotedQueryStringWithBoost() throws InterruptedException { float boost = 10.0f; assertAcked(prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1))); - indexRandom(true, - client().prepareIndex("test", "type1", "1").setSource("important", "phrase match", "less_important", "nothing important"), - client().prepareIndex("test", "type1", "2").setSource("important", "nothing important", "less_important", "phrase match") - ); + indexRandom(true, false, + client().prepareIndex("test", "type1", "1").setSource("important", "phrase match", "less_important", "nothing important"), + client().prepareIndex("test", "type1", "2").setSource("important", "nothing important", "less_important", "phrase match") + ); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")).get(); @@ -808,13 +811,6 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException, Execut assertFirstHit(searchResponse, hasId("1")); assertSecondHit(searchResponse, hasId("2")); assertThat((double)searchResponse.getHits().getAt(0).getScore(), closeTo(boost * searchResponse.getHits().getAt(1).getScore(), .1)); - - searchResponse = client().prepareSearch() - .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThat((double)searchResponse.getHits().getAt(0).getScore(), closeTo(boost * searchResponse.getHits().getAt(1).getScore(), .1)); } public void testSpecialRangeSyntaxInQueryString() { diff --git a/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index e7675a9cbb146..094aaa5d1cdd7 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -83,7 +83,7 @@ public class SimpleQueryStringIT extends ESIntegTestCase { @BeforeClass public static void createRandomClusterSetting() { - CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(50, 100); + CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index 4febd0695c936..882b3cc4b1e86 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -35,8 +35,8 @@ import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.translog.BufferedChecksumStreamOutput; +import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat; import org.elasticsearch.snapshots.mockstore.BlobContainerWrapper; @@ -110,24 +110,17 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par public void testBlobStoreOperations() throws IOException { BlobStore blobStore = createTestBlobStore(); BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); - ChecksumBlobStoreFormat checksumJSON = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, - xContentRegistry(), false, XContentType.JSON); ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, - xContentRegistry(), false, XContentType.SMILE); + xContentRegistry(), false); ChecksumBlobStoreFormat checksumSMILECompressed = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, - xContentRegistry(), true, XContentType.SMILE); + xContentRegistry(), true); // Write blobs in different formats - checksumJSON.write(new BlobObj("checksum json"), blobContainer, "check-json"); checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile"); checksumSMILECompressed.write(new BlobObj("checksum smile compressed"), blobContainer, "check-smile-comp"); // Assert that all checksum blobs can be read by all formats - assertEquals(checksumJSON.read(blobContainer, "check-json").getText(), "checksum json"); - assertEquals(checksumSMILE.read(blobContainer, "check-json").getText(), "checksum json"); - assertEquals(checksumJSON.read(blobContainer, "check-smile").getText(), "checksum smile"); assertEquals(checksumSMILE.read(blobContainer, "check-smile").getText(), "checksum smile"); - assertEquals(checksumJSON.read(blobContainer, "check-smile-comp").getText(), "checksum smile compressed"); assertEquals(checksumSMILE.read(blobContainer, "check-smile-comp").getText(), "checksum smile compressed"); } @@ -139,9 +132,9 @@ public void testCompressionIsApplied() throws IOException { veryRedundantText.append("Blah "); } ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, - xContentRegistry(), false, randomBoolean() ? XContentType.SMILE : XContentType.JSON); + xContentRegistry(), false); ChecksumBlobStoreFormat checksumFormatComp = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, - xContentRegistry(), true, randomBoolean() ? XContentType.SMILE : XContentType.JSON); + xContentRegistry(), true); BlobObj blobObj = new BlobObj(veryRedundantText.toString()); checksumFormatComp.write(blobObj, blobContainer, "blob-comp"); checksumFormat.write(blobObj, blobContainer, "blob-not-comp"); @@ -156,7 +149,7 @@ public void testBlobCorruption() throws IOException { String testString = randomAlphaOfLength(randomInt(10000)); BlobObj blobObj = new BlobObj(testString); ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, - xContentRegistry(), randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); + xContentRegistry(), randomBoolean()); checksumFormat.write(blobObj, blobContainer, "test-path"); assertEquals(checksumFormat.read(blobContainer, "test-path").getText(), testString); randomCorruption(blobContainer, "test-path"); @@ -191,7 +184,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par } }; final ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, - xContentRegistry(), randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); + xContentRegistry(), randomBoolean()); ExecutorService threadPool = Executors.newFixedThreadPool(1); try { Future future = threadPool.submit(new Callable() { @@ -201,11 +194,12 @@ public Void call() throws Exception { return null; } }); + // signalling block.await(5, TimeUnit.SECONDS); - assertFalse(blobContainer.blobExists("test-blob")); + assertFalse(BlobStoreTestUtil.blobExists(blobContainer, "test-blob")); unblock.countDown(); future.get(); - assertTrue(blobContainer.blobExists("test-blob")); + assertTrue(BlobStoreTestUtil.blobExists(blobContainer, "test-blob")); } finally { threadPool.shutdown(); } @@ -215,7 +209,7 @@ public void testAtomicWriteFailures() throws Exception { final String name = randomAlphaOfLength(10); final BlobObj blobObj = new BlobObj("test"); final ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, - xContentRegistry(), randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); + xContentRegistry(), randomBoolean()); final BlobStore blobStore = createTestBlobStore(); final BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 6ef892a74bb5a..ceff9f2b55bf5 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.snapshots; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -3513,6 +3514,7 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { } public void testSnapshotStatusOnFailedIndex() throws Exception { + assumeFalse("https://github.com/elastic/elasticsearch/issues/44671", Constants.WINDOWS); logger.info("--> creating repository"); final Path repoPath = randomRepoPath(); final Client client = client(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 9f1e7ddce6b6d..289d707a1e4d3 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -108,6 +108,7 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.ClusterSettings; @@ -152,6 +153,7 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.snapshots.mockstore.MockEventuallyConsistentRepository; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.disruption.DisruptableMockTransport; import org.elasticsearch.test.disruption.NetworkDisruption; @@ -203,9 +205,18 @@ public class SnapshotResiliencyTests extends ESTestCase { private Path tempDir; + /** + * Context shared by all the node's {@link Repository} instances if the eventually consistent blobstore is to be used. + * {@code null} if not using the eventually consistent blobstore. + */ + @Nullable private MockEventuallyConsistentRepository.Context blobStoreContext; + @Before public void createServices() { tempDir = createTempDir(); + if (randomBoolean()) { + blobStoreContext = new MockEventuallyConsistentRepository.Context(); + } deterministicTaskQueue = new DeterministicTaskQueue(Settings.builder().put(NODE_NAME_SETTING.getKey(), "shared").build(), random()); } @@ -213,6 +224,9 @@ public void createServices() { @After public void verifyReposThenStopServices() { try { + if (blobStoreContext != null) { + blobStoreContext.forceConsistent(); + } BlobStoreTestUtil.assertConsistency( (BlobStoreRepository) testClusterNodes.randomMasterNodeSafe().repositoriesService.repository("repo"), Runnable::run); @@ -900,19 +914,7 @@ public void onFailure(final Exception e) { final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); repositoriesService = new RepositoriesService( settings, clusterService, transportService, - Collections.singletonMap(FsRepository.TYPE, metaData -> { - final Repository repository = new FsRepository(metaData, environment, xContentRegistry(), threadPool) { - @Override - protected void assertSnapshotOrGenericThread() { - // eliminate thread name check as we create repo in the test thread - } - }; - repository.start(); - return repository; - } - ), - emptyMap(), - threadPool + Collections.singletonMap(FsRepository.TYPE, getRepoFactory(environment)), emptyMap(), threadPool ); snapshotsService = new SnapshotsService(settings, clusterService, indexNameExpressionResolver, repositoriesService, threadPool); @@ -1066,9 +1068,9 @@ searchTransportService, new SearchPhaseController(searchService::createReduceCon actions.put(IndicesShardStoresAction.INSTANCE, new TransportIndicesShardStoresAction( transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - new TransportNodesListGatewayStartedShards(settings, - threadPool, clusterService, transportService, actionFilters, nodeEnv, indicesService, namedXContentRegistry)) - ); + client)); + actions.put(TransportNodesListGatewayStartedShards.TYPE, new TransportNodesListGatewayStartedShards(settings, + threadPool, clusterService, transportService, actionFilters, nodeEnv, indicesService, namedXContentRegistry)); actions.put(DeleteSnapshotAction.INSTANCE, new TransportDeleteSnapshotAction( transportService, clusterService, threadPool, @@ -1077,6 +1079,28 @@ searchTransportService, new SearchPhaseController(searchService::createReduceCon client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); } + private Repository.Factory getRepoFactory(Environment environment) { + // Run half the tests with the eventually consistent repository + if (blobStoreContext == null) { + return metaData -> { + final Repository repository = new FsRepository(metaData, environment, xContentRegistry(), threadPool) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo in the test thread + } + }; + repository.start(); + return repository; + }; + } else { + return metaData -> { + final Repository repository = new MockEventuallyConsistentRepository( + metaData, environment, xContentRegistry(), deterministicTaskQueue.getThreadPool(), blobStoreContext); + repository.start(); + return repository; + }; + } + } public void restart() { testClusterNodes.disconnectNode(this); final ClusterState oldState = this.clusterService.state(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index df1d003aa55a5..15faecf46ca40 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -38,11 +38,6 @@ public BlobPath path() { return delegate.path(); } - @Override - public boolean blobExists(String blobName) { - return delegate.blobExists(blobName); - } - @Override public InputStream readBlob(String name) throws IOException { return delegate.readBlob(name); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java new file mode 100644 index 0000000000000..d21f3db81e69c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -0,0 +1,334 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.snapshots.mockstore; + +import org.apache.lucene.codecs.CodecUtil; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; + +/** + * Mock Repository that allows testing the eventually consistent behaviour of AWS S3 as documented in the + * AWS S3 docs. + * Currently, the repository asserts that no inconsistent reads are made. + * TODO: Resolve todos on list and overwrite operation consistency to fully cover S3's behavior. + */ +public class MockEventuallyConsistentRepository extends BlobStoreRepository { + + private final Context context; + + private final NamedXContentRegistry namedXContentRegistry; + + public MockEventuallyConsistentRepository(RepositoryMetaData metadata, Environment environment, + NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool, Context context) { + super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); + this.context = context; + this.namedXContentRegistry = namedXContentRegistry; + } + + // Filters out all actions that are super-seeded by subsequent actions + // TODO: Remove all usages of this method, snapshots should not depend on consistent list operations + private static List consistentView(List actions) { + final Map lastActions = new HashMap<>(); + for (BlobStoreAction action : actions) { + if (action.operation == Operation.PUT) { + lastActions.put(action.path, action); + } else if (action.operation == Operation.DELETE) { + lastActions.remove(action.path); + } + } + return List.copyOf(lastActions.values()); + } + + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo in the test thread + } + + @Override + protected BlobStore createBlobStore() { + return new MockBlobStore(); + } + + /** + * Context that must be shared between all instances of {@link MockEventuallyConsistentRepository} in a test run. + */ + public static final class Context { + + private final List actions = new ArrayList<>(); + + /** + * Force the repository into a consistent end state so that its eventual state can be examined. + */ + public void forceConsistent() { + synchronized (actions) { + final List consistentActions = consistentView(actions); + actions.clear(); + actions.addAll(consistentActions); + } + } + } + + private enum Operation { + PUT, GET, DELETE + } + + private static final class BlobStoreAction { + + private final Operation operation; + + @Nullable + private final byte[] data; + + private final String path; + + private BlobStoreAction(Operation operation, String path, byte[] data) { + this.operation = operation; + this.path = path; + this.data = data; + } + + private BlobStoreAction(Operation operation, String path) { + this(operation, path, null); + } + } + + private class MockBlobStore implements BlobStore { + + private AtomicBoolean closed = new AtomicBoolean(false); + + @Override + public BlobContainer blobContainer(BlobPath path) { + return new MockBlobContainer(path); + } + + @Override + public void close() { + closed.set(true); + } + + private void ensureNotClosed() { + if (closed.get()) { + throw new AssertionError("Blobstore is closed already"); + } + } + + private class MockBlobContainer implements BlobContainer { + + private final BlobPath path; + + MockBlobContainer(BlobPath path) { + this.path = path; + } + + @Override + public BlobPath path() { + return path; + } + + @Override + public InputStream readBlob(String name) throws NoSuchFileException { + ensureNotClosed(); + final String blobPath = path.buildAsString() + name; + synchronized (context.actions) { + final List relevantActions = relevantActions(blobPath); + context.actions.add(new BlobStoreAction(Operation.GET, blobPath)); + if (relevantActions.stream().noneMatch(a -> a.operation == Operation.PUT)) { + throw new NoSuchFileException(blobPath); + } + if (relevantActions.size() == 1 && relevantActions.get(0).operation == Operation.PUT) { + // Consistent read after write + return new ByteArrayInputStream(relevantActions.get(0).data); + } + throw new AssertionError("Inconsistent read on [" + blobPath + ']'); + } + } + + private List relevantActions(String blobPath) { + assert Thread.holdsLock(context.actions); + final List relevantActions = new ArrayList<>( + context.actions.stream().filter(action -> blobPath.equals(action.path)).collect(Collectors.toList())); + for (int i = relevantActions.size() - 1; i > 0; i--) { + if (relevantActions.get(i).operation == Operation.GET) { + relevantActions.remove(i); + } else { + break; + } + } + return relevantActions; + } + + @Override + public void deleteBlob(String blobName) { + ensureNotClosed(); + synchronized (context.actions) { + context.actions.add(new BlobStoreAction(Operation.DELETE, path.buildAsString() + blobName)); + } + } + + @Override + public void delete() { + ensureNotClosed(); + final String thisPath = path.buildAsString(); + synchronized (context.actions) { + consistentView(context.actions).stream().filter(action -> action.path.startsWith(thisPath)) + .forEach(a -> context.actions.add(new BlobStoreAction(Operation.DELETE, a.path))); + } + } + + @Override + public Map listBlobs() { + ensureNotClosed(); + final String thisPath = path.buildAsString(); + synchronized (context.actions) { + return consistentView(context.actions).stream() + .filter( + action -> action.path.startsWith(thisPath) && action.path.substring(thisPath.length()).indexOf('/') == -1 + && action.operation == Operation.PUT) + .collect( + Collectors.toMap( + action -> action.path.substring(thisPath.length()), + action -> new PlainBlobMetaData(action.path.substring(thisPath.length()), action.data.length))); + } + } + + @Override + public Map children() { + ensureNotClosed(); + final String thisPath = path.buildAsString(); + synchronized (context.actions) { + return consistentView(context.actions).stream() + .filter(action -> + action.operation == Operation.PUT + && action.path.startsWith(thisPath) && action.path.substring(thisPath.length()).indexOf('/') != -1) + .map(action -> action.path.substring(thisPath.length()).split("/")[0]) + .distinct() + .collect(Collectors.toMap(Function.identity(), name -> new MockBlobContainer(path.add(name)))); + } + } + + @Override + public Map listBlobsByPrefix(String blobNamePrefix) { + return Maps.ofEntries( + listBlobs().entrySet().stream().filter(entry -> entry.getKey().startsWith(blobNamePrefix)).collect(Collectors.toList()) + ); + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) + throws IOException { + ensureNotClosed(); + assert blobSize < Integer.MAX_VALUE; + final byte[] data = new byte[(int) blobSize]; + final int read = inputStream.read(data); + assert read == data.length; + final String blobPath = path.buildAsString() + blobName; + synchronized (context.actions) { + final List relevantActions = relevantActions(blobPath); + // We do some checks in case there is a consistent state for a blob to prevent turning it inconsistent. + final boolean hasConsistentContent = + relevantActions.size() == 1 && relevantActions.get(0).operation == Operation.PUT; + if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobName)) { + // TODO: Ensure that it is impossible to ever decrement the generation id stored in index.latest then assert that + // it never decrements here + } else if (blobName.startsWith(BlobStoreRepository.SNAPSHOT_PREFIX)) { + if (hasConsistentContent) { + if (basePath().buildAsString().equals(path().buildAsString())) { + try { + // TODO: dry up the logic for reading SnapshotInfo here against the code in ChecksumBlobStoreFormat + final int offset = CodecUtil.headerLength(BlobStoreRepository.SNAPSHOT_CODEC); + final SnapshotInfo updatedInfo = SnapshotInfo.fromXContentInternal( + XContentHelper.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, + new BytesArray(data, offset, data.length - offset - CodecUtil.footerLength()), + XContentType.SMILE)); + // If the existing snapshotInfo differs only in the timestamps it stores, then the overwrite is not + // a problem and could be the result of a correctly handled master failover. + final SnapshotInfo existingInfo = snapshotFormat.readBlob(this, blobName); + assertThat(existingInfo.snapshotId(), equalTo(updatedInfo.snapshotId())); + assertThat(existingInfo.reason(), equalTo(updatedInfo.reason())); + assertThat(existingInfo.state(), equalTo(updatedInfo.state())); + assertThat(existingInfo.totalShards(), equalTo(updatedInfo.totalShards())); + assertThat(existingInfo.successfulShards(), equalTo(updatedInfo.successfulShards())); + assertThat( + existingInfo.shardFailures(), containsInAnyOrder(updatedInfo.shardFailures().toArray())); + assertThat(existingInfo.indices(), equalTo(updatedInfo.indices())); + return; // No need to add a write for this since we didn't change content + } catch (Exception e) { + // Rethrow as AssertionError here since kind exception might otherwise be swallowed and logged by + // the blob store repository. + // Since we are not doing any actual IO we don't expect this to throw ever and an exception would + // signal broken SnapshotInfo bytes or unexpected behavior of SnapshotInfo otherwise. + throw new AssertionError("Failed to deserialize SnapshotInfo", e); + } + } else { + // Primaries never retry so any shard level snap- blob retry/overwrite even with the same content is + // not expected. + throw new AssertionError("Shard level snap-{uuid} blobs should never be overwritten"); + } + } + } else { + if (hasConsistentContent) { + ESTestCase.assertArrayEquals("Tried to overwrite blob [" + blobName + "]", relevantActions.get(0).data, data); + return; // No need to add a write for this since we didn't change content + } + } + context.actions.add(new BlobStoreAction(Operation.PUT, blobPath, data)); + } + } + + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, + final boolean failIfAlreadyExists) throws IOException { + writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java new file mode 100644 index 0000000000000..81934fe93bd8a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java @@ -0,0 +1,181 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.snapshots.mockstore; + +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; + +import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; +import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; + +public class MockEventuallyConsistentRepositoryTests extends ESTestCase { + + private Environment environment; + + @Override + public void setUp() throws Exception { + super.setUp(); + final Path tempDir = createTempDir(); + final String nodeName = "testNode"; + environment = TestEnvironment.newEnvironment(Settings.builder() + .put(NODE_NAME_SETTING.getKey(), nodeName) + .put(PATH_HOME_SETTING.getKey(), tempDir.resolve(nodeName).toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo").toAbsolutePath()) + .build()); + } + + public void testReadAfterWriteConsistently() throws IOException { + MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); + try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { + repository.start(); + final BlobContainer blobContainer = repository.blobStore().blobContainer(repository.basePath()); + final String blobName = randomAlphaOfLength(10); + final int lengthWritten = randomIntBetween(1, 100); + final byte[] blobData = randomByteArrayOfLength(lengthWritten); + blobContainer.writeBlob(blobName, new ByteArrayInputStream(blobData), lengthWritten, true); + try (InputStream in = blobContainer.readBlob(blobName)) { + final byte[] readBytes = new byte[lengthWritten + 1]; + final int lengthSeen = in.read(readBytes); + assertThat(lengthSeen, equalTo(lengthWritten)); + assertArrayEquals(blobData, Arrays.copyOf(readBytes, lengthWritten)); + } + } + } + + public void testReadAfterWriteAfterReadThrows() throws IOException { + MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); + try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { + repository.start(); + final BlobContainer blobContainer = repository.blobStore().blobContainer(repository.basePath()); + final String blobName = randomAlphaOfLength(10); + final int lengthWritten = randomIntBetween(1, 100); + final byte[] blobData = randomByteArrayOfLength(lengthWritten); + expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob(blobName)); + blobContainer.writeBlob(blobName, new ByteArrayInputStream(blobData), lengthWritten, true); + assertThrowsOnInconsistentRead(blobContainer, blobName); + } + } + + public void testReadAfterDeleteAfterWriteThrows() throws IOException { + MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); + try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { + repository.start(); + final BlobContainer blobContainer = repository.blobStore().blobContainer(repository.basePath()); + final String blobName = randomAlphaOfLength(10); + final int lengthWritten = randomIntBetween(1, 100); + final byte[] blobData = randomByteArrayOfLength(lengthWritten); + blobContainer.writeBlob(blobName, new ByteArrayInputStream(blobData), lengthWritten, true); + blobContainer.deleteBlob(blobName); + assertThrowsOnInconsistentRead(blobContainer, blobName); + blobStoreContext.forceConsistent(); + expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob(blobName)); + } + } + + public void testOverwriteRandomBlobFails() throws IOException { + MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); + try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { + repository.start(); + final BlobContainer container = repository.blobStore().blobContainer(repository.basePath()); + final String blobName = randomAlphaOfLength(10); + final int lengthWritten = randomIntBetween(1, 100); + final byte[] blobData = randomByteArrayOfLength(lengthWritten); + container.writeBlob(blobName, new ByteArrayInputStream(blobData), lengthWritten, false); + final AssertionError assertionError = expectThrows(AssertionError.class, + () -> container.writeBlob(blobName, new ByteArrayInputStream(blobData), lengthWritten - 1, false)); + assertThat(assertionError.getMessage(), startsWith("Tried to overwrite blob [" + blobName +"]")); + } + } + + public void testOverwriteShardSnapBlobFails() throws IOException { + MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); + try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { + repository.start(); + final BlobContainer container = + repository.blobStore().blobContainer(repository.basePath().add("indices").add("someindex").add("0")); + final String blobName = BlobStoreRepository.SNAPSHOT_PREFIX + UUIDs.randomBase64UUID(); + final int lengthWritten = randomIntBetween(1, 100); + final byte[] blobData = randomByteArrayOfLength(lengthWritten); + container.writeBlob(blobName, new ByteArrayInputStream(blobData), lengthWritten, false); + final AssertionError assertionError = expectThrows(AssertionError.class, + () -> container.writeBlob(blobName, new ByteArrayInputStream(blobData), lengthWritten, false)); + assertThat(assertionError.getMessage(), equalTo("Shard level snap-{uuid} blobs should never be overwritten")); + } + } + + public void testOverwriteSnapshotInfoBlob() { + MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); + try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( + new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), environment, + xContentRegistry(), mock(ThreadPool.class), blobStoreContext)) { + repository.start(); + + // We create a snap- blob for snapshot "foo" in the first generation + final SnapshotId snapshotId = new SnapshotId("foo", UUIDs.randomBase64UUID()); + repository.finalizeSnapshot(snapshotId, Collections.emptyList(), 1L, null, 5, Collections.emptyList(), + -1L, false, Collections.emptyMap()); + + // We try to write another snap- blob for "foo" in the next generation. It fails because the content differs. + final AssertionError assertionError = expectThrows(AssertionError.class, + () -> repository.finalizeSnapshot( + snapshotId, Collections.emptyList(), 1L, null, 6, Collections.emptyList(), + 0, false, Collections.emptyMap())); + assertThat(assertionError.getMessage(), equalTo("\nExpected: <6>\n but: was <5>")); + + // We try to write yet another snap- blob for "foo" in the next generation. + // It passes cleanly because the content of the blob except for the timestamps. + repository.finalizeSnapshot(snapshotId, Collections.emptyList(), 1L, null, 5, Collections.emptyList(), + 0, false, Collections.emptyMap()); + } + } + + private static void assertThrowsOnInconsistentRead(BlobContainer blobContainer, String blobName) { + final AssertionError assertionError = expectThrows(AssertionError.class, () -> blobContainer.readBlob(blobName)); + assertThat(assertionError.getMessage(), equalTo("Inconsistent read on [" + blobName + ']')); + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 21ade636f53df..a552e7ac54664 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -311,11 +311,6 @@ private void blockExecutionAndFail(final String blobName) throws IOException { super(delegate); } - @Override - public boolean blobExists(String blobName) { - return super.blobExists(blobName); - } - @Override public InputStream readBlob(String name) throws IOException { maybeIOExceptionOrBlock(name); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index 01b09669fae2f..a201b3210376c 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -170,11 +170,6 @@ private TestRequest(StreamInput in) throws IOException { this.value = in.readString(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -195,11 +190,6 @@ private TestResponse(StreamInput in) throws IOException { this.value = in.readString(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(value); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java b/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java index 2615a3fdc35a9..4bace54501b1c 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -28,6 +29,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Arrays; @@ -88,7 +90,7 @@ public void testReadResponse() throws IOException { boolean compress = randomBoolean(); threadContext.putHeader("header", "header_value"); Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); - OutboundMessage.Response request = new OutboundMessage.Response(threadContext, features, message, version, requestId, isHandshake, + OutboundMessage.Response request = new OutboundMessage.Response(threadContext, message, version, requestId, isHandshake, compress); BytesReference reference; try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { @@ -124,7 +126,7 @@ public void testReadErrorResponse() throws IOException { boolean compress = randomBoolean(); threadContext.putHeader("header", "header_value"); Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); - OutboundMessage.Response request = new OutboundMessage.Response(threadContext, features, exception, version, requestId, + OutboundMessage.Response request = new OutboundMessage.Response(threadContext, exception, version, requestId, isHandshake, compress); BytesReference reference; try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { @@ -181,6 +183,24 @@ public void testEnsureVersionCompatibility() throws IOException { + version.minimumCompatibilityVersion() + "]", ise.getMessage()); } + public void testThrowOnNotCompressed() throws Exception { + OutboundMessage.Response request = new OutboundMessage.Response( + threadContext, new Message(randomAlphaOfLength(10)), Version.CURRENT, randomLong(), false, false); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + final byte[] serialized = BytesReference.toBytes(reference); + final int statusPosition = TcpHeader.HEADER_SIZE - TcpHeader.VERSION_ID_SIZE - 1; + // force status byte to signal compressed on the otherwise uncompressed message + serialized[statusPosition] = TransportStatus.setCompress(serialized[statusPosition]); + reference = new BytesArray(serialized); + InboundMessage.Reader reader = new InboundMessage.Reader(Version.CURRENT, registry, threadContext); + BytesReference sliced = reference.slice(6, reference.length() - 6); + final IllegalStateException iste = expectThrows(IllegalStateException.class, () -> reader.deserialize(sliced)); + assertThat(iste.getMessage(), Matchers.startsWith("stream marked as compressed, but no compressor found,")); + } + private void testVersionIncompatibility(Version version, Version currentVersion, boolean isHandshake) throws IOException { String[] features = {}; String value = randomAlphaOfLength(10); @@ -204,22 +224,14 @@ private static final class Message extends TransportMessage { public String value; - private Message() { - } - private Message(StreamInput in) throws IOException { - readFrom(in); + value = in.readString(); } private Message(String value) { this.value = value; } - @Override - public void readFrom(StreamInput in) throws IOException { - value = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(value); diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index baab504e61fa4..963ba63e1c3e8 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -158,8 +158,7 @@ public void onRequestSent(DiscoveryNode node, long requestId, String action, Tra InboundMessage.Request inboundRequest = (InboundMessage.Request) inboundMessage; assertThat(inboundRequest.getFeatures(), contains(feature1, feature2)); - Request readMessage = new Request(); - readMessage.readFrom(inboundMessage.getStreamInput()); + Request readMessage = new Request(inboundMessage.getStreamInput()); assertEquals(value, readMessage.value); try (ThreadContext.StoredContext existing = threadContext.stashContext()) { @@ -193,7 +192,7 @@ public void onResponseSent(long requestId, String action, TransportResponse resp responseRef.set(response); } }); - handler.sendResponse(version, Collections.emptySet(), channel, requestId, action, response, compress, isHandshake); + handler.sendResponse(version, channel, requestId, action, response, compress, isHandshake); BytesReference reference = channel.getMessageCaptor().get(); ActionListener sendListener = channel.getListenerCaptor().get(); @@ -226,8 +225,7 @@ public void onResponseSent(long requestId, String action, TransportResponse resp InboundMessage.Response inboundResponse = (InboundMessage.Response) inboundMessage; assertFalse(inboundResponse.isError()); - Response readMessage = new Response(); - readMessage.readFrom(inboundMessage.getStreamInput()); + Response readMessage = new Response(inboundMessage.getStreamInput()); assertEquals(value, readMessage.value); try (ThreadContext.StoredContext existing = threadContext.stashContext()) { @@ -258,7 +256,7 @@ public void onResponseSent(long requestId, String action, Exception error) { responseRef.set(error); } }); - handler.sendErrorResponse(version, Collections.emptySet(), channel, requestId, action, error); + handler.sendErrorResponse(version, channel, requestId, action, error); BytesReference reference = channel.getMessageCaptor().get(); ActionListener sendListener = channel.getListenerCaptor().get(); @@ -302,18 +300,14 @@ private static final class Request extends TransportRequest { public String value; - private Request() { + private Request(StreamInput in) throws IOException { + value = in.readString(); } private Request(String value) { this.value = value; } - @Override - public void readFrom(StreamInput in) throws IOException { - value = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(value); @@ -324,18 +318,15 @@ private static final class Response extends TransportResponse { public String value; - private Response() { + private Response(StreamInput in) throws IOException { + super(in); + value = in.readString(); } private Response(String value) { this.value = value; } - @Override - public void readFrom(StreamInput in) throws IOException { - value = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(value); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index 6e9c2e4eaf320..3d34751216065 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -55,7 +55,10 @@ public void testConnectAndExecuteRequest() throws Exception { .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { service.start(); + // following two log lines added to investigate #41745, can be removed once issue is closed + logger.info("Start accepting incoming requests on local transport service"); service.acceptIncomingRequests(); + logger.info("now accepting incoming requests on local transport"); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); assertTrue(remoteClusterService.isRemoteNodeConnected("test", remoteNode)); Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test"); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index d62bd37564d74..37b05dbe128ea 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -63,6 +64,7 @@ import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.junit.Before; import java.io.IOException; import java.net.InetAddress; @@ -90,6 +92,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -98,6 +101,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.endsWith; public class RemoteClusterConnectionTests extends ESTestCase { @@ -110,6 +114,13 @@ public void tearDown() throws Exception { ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + assumeFalse("https://github.com/elastic/elasticsearch/issues/44339", System.getProperty("os.name").contains("Win")); + } + private MockTransportService startTransport(String id, List knownNodes, Version version) { return startTransport(id, knownNodes, version, threadPool); } @@ -478,9 +489,10 @@ public void testConnectWithIncompatibleTransports() throws Exception { public void testRemoteConnectionVersionMatchesTransportConnectionVersion() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); - final Version previousVersion = VersionUtils.getPreviousVersion(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, previousVersion); - MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + final Version previousVersion = randomValueOtherThan(Version.CURRENT, () -> VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT)); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, previousVersion)) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); assertThat(seedNode, notNullValue()); @@ -519,12 +531,10 @@ public void sendRequest(long requestId, String action, TransportRequest request, service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, connectionManager)) { - connection.addConnectedNode(seedNode); - for (DiscoveryNode node : knownNodes) { - final Transport.Connection transportConnection = connection.getConnection(node); - assertThat(transportConnection.getVersion(), equalTo(previousVersion)); - } + PlainActionFuture.get(fut -> connection.ensureConnected(ActionListener.map(fut, x -> null))); assertThat(knownNodes, iterableWithSize(2)); + assertThat(connection.getConnection(seedNode).getVersion(), equalTo(Version.CURRENT)); + assertThat(connection.getConnection(oldVersionNode).getVersion(), equalTo(previousVersion)); } } } @@ -979,7 +989,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted discoverableTransports.add(transportService); } - List>> seedNodes = randomSubsetOf(discoverableNodes); + List>> seedNodes = new CopyOnWriteArrayList<>(randomSubsetOf(discoverableNodes)); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -1020,11 +1030,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted barrier.await(); for (int j = 0; j < numDisconnects; j++) { if (randomBoolean()) { + String node = "discoverable_node_added" + counter.incrementAndGet(); MockTransportService transportService = - startTransport("discoverable_node_added" + counter.incrementAndGet(), knownNodes, + startTransport(node, knownNodes, Version.CURRENT); discoverableTransports.add(transportService); - connection.addConnectedNode(transportService.getLocalDiscoNode()); + seedNodes.add(Tuple.tuple(node, () -> transportService.getLocalDiscoNode())); + PlainActionFuture.get(fut -> connection.updateSeedNodes(null, seedNodes, + ActionListener.map(fut, x -> null))); } else { DiscoveryNode node = randomFrom(discoverableNodes).v2().get(); connection.onNodeDisconnected(node); @@ -1095,9 +1108,11 @@ public void testClusterNameIsChecked() throws Exception { assertTrue(connection.assertNoRunningConnections()); IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> updateSeedNodes(connection, Arrays.asList(Tuple.tuple("other", otherClusterTransport::getLocalDiscoNode)))); - assertThat(illegalStateException.getMessage(), - startsWith("handshake failed, mismatched cluster name [Cluster [otherCluster]]" + - " - {other_cluster_discoverable_node}")); + assertThat(illegalStateException.getMessage(), allOf( + startsWith("handshake with [{other_cluster_discoverable_node}"), + containsString(otherClusterTransport.getLocalDiscoNode().toString()), + endsWith(" failed: remote cluster name [otherCluster] " + + "does not match expected remote cluster name [testClusterNameIsChecked]"))); } } } @@ -1133,8 +1148,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport); StubbableConnectionManager connectionManager = new StubbableConnectionManager(delegate, Settings.EMPTY, service.transport); - connectionManager.addNodeConnectedBehavior(connectedNode.getAddress(), (cm, discoveryNode) - -> discoveryNode.equals(connectedNode)); + connectionManager.setDefaultNodeConnectedBehavior(cm -> Collections.singleton(connectedNode)); connectionManager.addConnectBehavior(connectedNode.getAddress(), (cm, discoveryNode) -> { if (discoveryNode == connectedNode) { @@ -1146,7 +1160,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", seedNodes(connectedNode), service, Integer.MAX_VALUE, n -> true, null, connectionManager)) { - connection.addConnectedNode(connectedNode); + PlainActionFuture.get(fut -> connection.ensureConnected(ActionListener.map(fut, x -> null))); for (int i = 0; i < 10; i++) { //always a direct connection as the remote node is already connected Transport.Connection remoteConnection = connection.getConnection(connectedNode); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index 96e1da6e8319e..9c4de7f477da9 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -83,7 +83,7 @@ private MockTransportService buildService(final Version version) { public void testSendMessage() throws InterruptedException { - serviceA.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:test", ThreadPool.Names.SAME, SimpleTestRequest::new, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse("TS_A"); @@ -92,7 +92,7 @@ public void testSendMessage() throws InterruptedException { TransportActionProxy.registerProxyAction(serviceA, "internal:test", SimpleTestResponse::new); serviceA.connectToNode(nodeB); - serviceB.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:test", ThreadPool.Names.SAME, SimpleTestRequest::new, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse("TS_B"); @@ -100,7 +100,7 @@ public void testSendMessage() throws InterruptedException { }); TransportActionProxy.registerProxyAction(serviceB, "internal:test", SimpleTestResponse::new); serviceB.connectToNode(nodeC); - serviceC.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceC.registerRequestHandler("internal:test", ThreadPool.Names.SAME, SimpleTestRequest::new, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse("TS_C"); @@ -143,7 +143,7 @@ public String executor() { } public void testException() throws InterruptedException { - serviceA.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:test", ThreadPool.Names.SAME, SimpleTestRequest::new, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse("TS_A"); @@ -152,7 +152,7 @@ public void testException() throws InterruptedException { TransportActionProxy.registerProxyAction(serviceA, "internal:test", SimpleTestResponse::new); serviceA.connectToNode(nodeB); - serviceB.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:test", ThreadPool.Names.SAME, SimpleTestRequest::new, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse("TS_B"); @@ -160,7 +160,7 @@ public void testException() throws InterruptedException { }); TransportActionProxy.registerProxyAction(serviceB, "internal:test", SimpleTestResponse::new); serviceB.connectToNode(nodeC); - serviceC.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceC.registerRequestHandler("internal:test", ThreadPool.Names.SAME, SimpleTestRequest::new, (request, channel, task) -> { throw new ElasticsearchException("greetings from TS_C"); }); @@ -209,9 +209,8 @@ public SimpleTestRequest(String sourceNode) { } public SimpleTestRequest() {} - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public SimpleTestRequest(StreamInput in) throws IOException { + super(in); sourceNode = in.readString(); } @@ -234,11 +233,6 @@ public static class SimpleTestResponse extends TransportResponse { this.targetNode = in.readString(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(targetNode); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index dd46059aa2abf..8187ceba19824 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -37,7 +37,7 @@ import static org.mockito.Mockito.mock; -@TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace") +@TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace", reason = "to ensure we log network events on TRACE level") public class TransportLoggerTests extends ESTestCase { private MockLogAppender appender; diff --git a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index 5e96dd8ec8385..80f80547c1d60 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -135,9 +135,10 @@ public void testMismatchedClusterName() { PlainActionFuture.get(fut -> handleA.transportService.handshake(connection, timeout, ActionListener.map(fut, x -> null))); } }); - assertThat(ex.getMessage(), containsString("handshake failed, mismatched cluster name [Cluster [b]]")); + assertThat(ex.getMessage(), containsString("handshake with [" + discoveryNode + + "] failed: remote cluster name [b] does not match local cluster name [a]")); assertFalse(handleA.transportService.nodeConnected(discoveryNode)); -} + } public void testIncompatibleVersions() { Settings settings = Settings.builder().put("cluster.name", "test").build(); @@ -156,7 +157,9 @@ public void testIncompatibleVersions() { PlainActionFuture.get(fut -> handleA.transportService.handshake(connection, timeout, ActionListener.map(fut, x -> null))); } }); - assertThat(ex.getMessage(), containsString("handshake failed, incompatible version")); + assertThat(ex.getMessage(), containsString("handshake with [" + discoveryNode + + "] failed: remote node version [" + handleB.discoveryNode.getVersion() + "] is incompatible with local node version [" + + Version.CURRENT + "]")); assertFalse(handleA.transportService.nodeConnected(discoveryNode)); } diff --git a/server/src/test/java/org/elasticsearch/update/UpdateNoopIT.java b/server/src/test/java/org/elasticsearch/update/UpdateNoopIT.java index 2cb71d9bcbe0a..12201eca1a51c 100644 --- a/server/src/test/java/org/elasticsearch/update/UpdateNoopIT.java +++ b/server/src/test/java/org/elasticsearch/update/UpdateNoopIT.java @@ -29,6 +29,7 @@ import java.io.IOException; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; /** @@ -36,16 +37,16 @@ */ public class UpdateNoopIT extends ESIntegTestCase { public void testSingleField() throws Exception { - updateAndCheckSource(1, fields("bar", "baz")); - updateAndCheckSource(1, fields("bar", "baz")); - updateAndCheckSource(2, fields("bar", "bir")); - updateAndCheckSource(2, fields("bar", "bir")); - updateAndCheckSource(3, fields("bar", "foo")); - updateAndCheckSource(4, fields("bar", null)); - updateAndCheckSource(4, fields("bar", null)); - updateAndCheckSource(5, fields("bar", "foo")); + updateAndCheckSource(0, 1, fields("bar", "baz")); + updateAndCheckSource(0, 1, fields("bar", "baz")); + updateAndCheckSource(1, 2, fields("bar", "bir")); + updateAndCheckSource(1, 2, fields("bar", "bir")); + updateAndCheckSource(2, 3, fields("bar", "foo")); + updateAndCheckSource(3, 4, fields("bar", null)); + updateAndCheckSource(3, 4, fields("bar", null)); + updateAndCheckSource(4, 5, fields("bar", "foo")); // detect_noop defaults to true - updateAndCheckSource(5, null, fields("bar", "foo")); + updateAndCheckSource(4, 5, null, fields("bar", "foo")); assertEquals(4, totalNoopUpdates()); } @@ -55,36 +56,36 @@ public void testTwoFields() throws Exception { String key1 = 1 + randomAlphaOfLength(3); String key2 = 2 + randomAlphaOfLength(3); String key3 = 3 + randomAlphaOfLength(3); - updateAndCheckSource(1, fields(key1, "foo", key2, "baz")); - updateAndCheckSource(1, fields(key1, "foo", key2, "baz")); - updateAndCheckSource(2, fields(key1, "foo", key2, "bir")); - updateAndCheckSource(2, fields(key1, "foo", key2, "bir")); - updateAndCheckSource(3, fields(key1, "foo", key2, "foo")); - updateAndCheckSource(4, fields(key1, "foo", key2, null)); - updateAndCheckSource(4, fields(key1, "foo", key2, null)); - updateAndCheckSource(5, fields(key1, "foo", key2, "foo")); - updateAndCheckSource(6, fields(key1, null, key2, "foo")); - updateAndCheckSource(6, fields(key1, null, key2, "foo")); - updateAndCheckSource(7, fields(key1, null, key2, null)); - updateAndCheckSource(7, fields(key1, null, key2, null)); - updateAndCheckSource(8, fields(key1, null, key2, null, key3, null)); + updateAndCheckSource(0, 1, fields(key1, "foo", key2, "baz")); + updateAndCheckSource(0, 1, fields(key1, "foo", key2, "baz")); + updateAndCheckSource(1, 2, fields(key1, "foo", key2, "bir")); + updateAndCheckSource(1, 2, fields(key1, "foo", key2, "bir")); + updateAndCheckSource(2, 3, fields(key1, "foo", key2, "foo")); + updateAndCheckSource(3, 4, fields(key1, "foo", key2, null)); + updateAndCheckSource(3, 4, fields(key1, "foo", key2, null)); + updateAndCheckSource(4, 5, fields(key1, "foo", key2, "foo")); + updateAndCheckSource(5, 6, fields(key1, null, key2, "foo")); + updateAndCheckSource(5, 6, fields(key1, null, key2, "foo")); + updateAndCheckSource(6, 7, fields(key1, null, key2, null)); + updateAndCheckSource(6, 7, fields(key1, null, key2, null)); + updateAndCheckSource(7, 8, fields(key1, null, key2, null, key3, null)); assertEquals(5, totalNoopUpdates()); } public void testArrayField() throws Exception { - updateAndCheckSource(1, fields("bar", "baz")); - updateAndCheckSource(2, fields("bar", new String[] {"baz", "bort"})); - updateAndCheckSource(2, fields("bar", new String[] {"baz", "bort"})); - updateAndCheckSource(3, fields("bar", "bir")); - updateAndCheckSource(3, fields("bar", "bir")); - updateAndCheckSource(4, fields("bar", new String[] {"baz", "bort"})); - updateAndCheckSource(4, fields("bar", new String[] {"baz", "bort"})); - updateAndCheckSource(5, fields("bar", new String[] {"bir", "bort"})); - updateAndCheckSource(5, fields("bar", new String[] {"bir", "bort"})); - updateAndCheckSource(6, fields("bar", new String[] {"bir", "for"})); - updateAndCheckSource(6, fields("bar", new String[] {"bir", "for"})); - updateAndCheckSource(7, fields("bar", new String[] {"bir", "for", "far"})); + updateAndCheckSource(0, 1, fields("bar", "baz")); + updateAndCheckSource(1, 2, fields("bar", new String[] {"baz", "bort"})); + updateAndCheckSource(1, 2, fields("bar", new String[] {"baz", "bort"})); + updateAndCheckSource(2, 3, fields("bar", "bir")); + updateAndCheckSource(2, 3, fields("bar", "bir")); + updateAndCheckSource(3, 4, fields("bar", new String[] {"baz", "bort"})); + updateAndCheckSource(3, 4, fields("bar", new String[] {"baz", "bort"})); + updateAndCheckSource(4, 5, fields("bar", new String[] {"bir", "bort"})); + updateAndCheckSource(4, 5, fields("bar", new String[] {"bir", "bort"})); + updateAndCheckSource(5, 6, fields("bar", new String[] {"bir", "for"})); + updateAndCheckSource(5, 6, fields("bar", new String[] {"bir", "for"})); + updateAndCheckSource(6, 7, fields("bar", new String[] {"bir", "for", "far"})); assertEquals(5, totalNoopUpdates()); } @@ -94,42 +95,42 @@ public void testMap() throws Exception { String key1 = 1 + randomAlphaOfLength(3); String key2 = 2 + randomAlphaOfLength(3); String key3 = 3 + randomAlphaOfLength(3); - updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(0, 1, XContentFactory.jsonBuilder().startObject() .startObject("test") .field(key1, "foo") .field(key2, "baz") .endObject().endObject()); - updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(0, 1, XContentFactory.jsonBuilder().startObject() .startObject("test") .field(key1, "foo") .field(key2, "baz") .endObject().endObject()); - updateAndCheckSource(2, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(1, 2, XContentFactory.jsonBuilder().startObject() .startObject("test") .field(key1, "foo") .field(key2, "bir") .endObject().endObject()); - updateAndCheckSource(2, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(1, 2, XContentFactory.jsonBuilder().startObject() .startObject("test") .field(key1, "foo") .field(key2, "bir") .endObject().endObject()); - updateAndCheckSource(3, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(2, 3, XContentFactory.jsonBuilder().startObject() .startObject("test") .field(key1, "foo") .field(key2, "foo") .endObject().endObject()); - updateAndCheckSource(4, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(3, 4, XContentFactory.jsonBuilder().startObject() .startObject("test") .field(key1, "foo") .field(key2, (Object) null) .endObject().endObject()); - updateAndCheckSource(4, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(3, 4, XContentFactory.jsonBuilder().startObject() .startObject("test") .field(key1, "foo") .field(key2, (Object) null) .endObject().endObject()); - updateAndCheckSource(5, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(4, 5, XContentFactory.jsonBuilder().startObject() .startObject("test") .field(key1, "foo") .field(key2, (Object) null) @@ -140,63 +141,63 @@ public void testMap() throws Exception { } public void testMapAndField() throws Exception { - updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(0, 1, XContentFactory.jsonBuilder().startObject() .field("f", "foo") .startObject("m") .field("mf1", "foo") .field("mf2", "baz") .endObject() .endObject()); - updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(0, 1, XContentFactory.jsonBuilder().startObject() .field("f", "foo") .startObject("m") .field("mf1", "foo") .field("mf2", "baz") .endObject() .endObject()); - updateAndCheckSource(2, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(1, 2, XContentFactory.jsonBuilder().startObject() .field("f", "foo") .startObject("m") .field("mf1", "foo") .field("mf2", "bir") .endObject() .endObject()); - updateAndCheckSource(2, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(1, 2, XContentFactory.jsonBuilder().startObject() .field("f", "foo") .startObject("m") .field("mf1", "foo") .field("mf2", "bir") .endObject() .endObject()); - updateAndCheckSource(3, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(2, 3, XContentFactory.jsonBuilder().startObject() .field("f", "foo") .startObject("m") .field("mf1", "foo") .field("mf2", "foo") .endObject() .endObject()); - updateAndCheckSource(4, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(3, 4, XContentFactory.jsonBuilder().startObject() .field("f", "bar") .startObject("m") .field("mf1", "foo") .field("mf2", "foo") .endObject() .endObject()); - updateAndCheckSource(4, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(3, 4, XContentFactory.jsonBuilder().startObject() .field("f", "bar") .startObject("m") .field("mf1", "foo") .field("mf2", "foo") .endObject() .endObject()); - updateAndCheckSource(5, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(4, 5, XContentFactory.jsonBuilder().startObject() .field("f", "baz") .startObject("m") .field("mf1", "foo") .field("mf2", "foo") .endObject() .endObject()); - updateAndCheckSource(6, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(5, 6, XContentFactory.jsonBuilder().startObject() .field("f", "bop") .startObject("m") .field("mf1", "foo") @@ -212,16 +213,16 @@ public void testMapAndField() throws Exception { * its true by default. */ public void testTotallyEmpty() throws Exception { - updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject() + updateAndCheckSource(0, 1, XContentFactory.jsonBuilder().startObject() .field("f", "foo") .startObject("m") .field("mf1", "foo") .field("mf2", "baz") .endObject() .endObject()); - update(true, 1, XContentFactory.jsonBuilder().startObject().endObject()); - update(false, 2, XContentFactory.jsonBuilder().startObject().endObject()); - update(null, 2, XContentFactory.jsonBuilder().startObject().endObject()); + update(true, 0, 1, XContentFactory.jsonBuilder().startObject().endObject()); + update(false, 1, 2, XContentFactory.jsonBuilder().startObject().endObject()); + update(null, 1, 2, XContentFactory.jsonBuilder().startObject().endObject()); } private XContentBuilder fields(Object... fields) throws IOException { @@ -235,16 +236,16 @@ private XContentBuilder fields(Object... fields) throws IOException { return builder; } - private void updateAndCheckSource(long expectedVersion, XContentBuilder xContentBuilder) { - updateAndCheckSource(expectedVersion, true, xContentBuilder); + private void updateAndCheckSource(long expectedSeqNo, long expectedVersion, XContentBuilder xContentBuilder) { + updateAndCheckSource(expectedSeqNo, expectedVersion, true, xContentBuilder); } - private void updateAndCheckSource(long expectedVersion, Boolean detectNoop, XContentBuilder xContentBuilder) { - UpdateResponse updateResponse = update(detectNoop, expectedVersion, xContentBuilder); + private void updateAndCheckSource(long expectedSeqNo, long expectedVersion, Boolean detectNoop, XContentBuilder xContentBuilder) { + UpdateResponse updateResponse = update(detectNoop, expectedSeqNo, expectedVersion, xContentBuilder); assertEquals(updateResponse.getGetResult().sourceRef().utf8ToString(), BytesReference.bytes(xContentBuilder).utf8ToString()); } - private UpdateResponse update(Boolean detectNoop, long expectedVersion, XContentBuilder xContentBuilder) { + private UpdateResponse update(Boolean detectNoop, long expectedSeqNo, long expectedVersion, XContentBuilder xContentBuilder) { UpdateRequestBuilder updateRequest = client().prepareUpdate("test", "type1", "1") .setDoc(xContentBuilder) .setDocAsUpsert(true) @@ -254,7 +255,8 @@ private UpdateResponse update(Boolean detectNoop, long expectedVersion, XContent } UpdateResponse updateResponse = updateRequest.get(); assertThat(updateResponse.getGetResult(), notNullValue()); - assertEquals(expectedVersion, updateResponse.getVersion()); + assertThat(updateResponse.getSeqNo(), equalTo(expectedSeqNo)); + assertThat(updateResponse.getVersion(), equalTo(expectedVersion)); return updateResponse; } diff --git a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index 62048119b6974..6074bde2684b7 100644 --- a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -38,6 +38,8 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; import java.io.FileInputStream; import java.io.IOException; @@ -50,8 +52,10 @@ import java.util.Random; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Function; @@ -437,13 +441,24 @@ private void consumeOutput(HistoryOutput output, int eventId) { } } - public boolean isLinearizable() { + public void assertLinearizable() { logger.info("--> Linearizability checking history of size: {} for key: {} and initialVersion: {}: {}", history.size(), id, initialVersion, history); LinearizabilityChecker.SequentialSpec spec = new CASSequentialSpec(initialVersion); boolean linearizable = false; try { - linearizable = new LinearizabilityChecker().isLinearizable(spec, history, missingResponseGenerator()); + final ScheduledThreadPoolExecutor scheduler = Scheduler.initScheduler(Settings.EMPTY); + final AtomicBoolean abort = new AtomicBoolean(); + // Large histories can be problematic and have the linearizability checker run OOM + // Bound the time how long the checker can run on such histories (Values empirically determined) + if (history.size() > 300) { + scheduler.schedule(() -> abort.set(true), 10, TimeUnit.SECONDS); + } + linearizable = new LinearizabilityChecker().isLinearizable(spec, history, missingResponseGenerator(), abort::get); + ThreadPool.terminate(scheduler, 1, TimeUnit.SECONDS); + if (abort.get() && linearizable == false) { + linearizable = true; // let the test pass + } } finally { // implicitly test that we can serialize all histories. String serializedHistory = base64Serialize(history); @@ -453,11 +468,7 @@ public boolean isLinearizable() { spec, initialVersion, serializedHistory); } } - return linearizable; - } - - public void assertLinearizable() { - assertTrue("Must be linearizable", isLinearizable()); + assertTrue("Must be linearizable", linearizable); } } diff --git a/server/src/test/resources/org/elasticsearch/index/checkpoint/v2.ckp.binary b/server/src/test/resources/org/elasticsearch/index/checkpoint/v2.ckp.binary new file mode 100644 index 0000000000000..91377cef9d66d Binary files /dev/null and b/server/src/test/resources/org/elasticsearch/index/checkpoint/v2.ckp.binary differ diff --git a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java index b060d78b92b69..3bd6233225976 100644 --- a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java @@ -109,8 +109,8 @@ public static void main(String[] args) throws Exception { String haNameService = System.getProperty("ha-nameservice"); boolean haEnabled = haNameService != null; if (haEnabled) { - MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001); - MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002); + MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0); + MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0); MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2); MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice); builder.nnTopology(namenodeTopology); diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java index 55c1acabd3fbd..98d92ae4bd75d 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.tasks.Task; import static org.elasticsearch.action.support.PlainActionFuture.newFuture; @@ -37,6 +38,16 @@ Response executeBlocking(TransportAction action, Request requ return future.actionGet(); } + /** + * Executes the given action. + * + * This is a shim method to make execution publicly available in tests. + */ + public static + void execute(TransportAction action, Task task, Request request, ActionListener listener) { + action.execute(task, request, listener); + } + public static ActionListener assertNoFailureListener(CheckedConsumer consumer) { return ActionListener.wrap(consumer, e -> { throw new AssertionError(e); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 9be598bb384bd..2c2a63d65eac2 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -51,7 +51,6 @@ import static java.util.Collections.emptyMap; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; public abstract class ESAllocationTestCase extends ESTestCase { private static final ClusterSettings EMPTY_CLUSTER_SETTINGS = @@ -122,8 +121,7 @@ protected static ClusterState startRandomInitializingShard(ClusterState cluster if (initializingShards.isEmpty()) { return clusterState; } - return strategy.applyStartedShards(clusterState, - arrayAsArrayList(initializingShards.get(randomInt(initializingShards.size() - 1)))); + return startShardsAndReroute(strategy, clusterState, randomFrom(initializingShards)); } protected static AllocationDeciders yesAllocationDeciders() { @@ -149,11 +147,65 @@ protected ClusterState applyStartedShardsUntilNoChange(ClusterState clusterState do { lastClusterState = clusterState; logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); - clusterState = service.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + clusterState = startInitializingShardsAndReroute(service, clusterState); } while (lastClusterState.equals(clusterState) == false); return clusterState; } + /** + * Mark all initializing shards as started, then perform a reroute (which may start some other shards initializing). + * + * @return the cluster state after completing the reroute. + */ + public static ClusterState startInitializingShardsAndReroute(AllocationService allocationService, ClusterState clusterState) { + return startShardsAndReroute(allocationService, clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + } + + /** + * Mark all initializing shards on the given node as started, then perform a reroute (which may start some other shards initializing). + * + * @return the cluster state after completing the reroute. + */ + public static ClusterState startInitializingShardsAndReroute(AllocationService allocationService, + ClusterState clusterState, + RoutingNode routingNode) { + return startShardsAndReroute(allocationService, clusterState, routingNode.shardsWithState(INITIALIZING)); + } + + /** + * Mark all initializing shards for the given index as started, then perform a reroute (which may start some other shards initializing). + * + * @return the cluster state after completing the reroute. + */ + public static ClusterState startInitializingShardsAndReroute(AllocationService allocationService, + ClusterState clusterState, + String index) { + return startShardsAndReroute(allocationService, clusterState, + clusterState.routingTable().index(index).shardsWithState(INITIALIZING)); + } + + /** + * Mark the given shards as started, then perform a reroute (which may start some other shards initializing). + * + * @return the cluster state after completing the reroute. + */ + public static ClusterState startShardsAndReroute(AllocationService allocationService, + ClusterState clusterState, + ShardRouting... initializingShards) { + return startShardsAndReroute(allocationService, clusterState, Arrays.asList(initializingShards)); + } + + /** + * Mark the given shards as started, then perform a reroute (which may start some other shards initializing). + * + * @return the cluster state after completing the reroute. + */ + public static ClusterState startShardsAndReroute(AllocationService allocationService, + ClusterState clusterState, + List initializingShards) { + return allocationService.reroute(allocationService.applyStartedShards(clusterState, initializingShards), "reroute after starting"); + } + public static class TestAllocateDecision extends AllocationDecider { private final Decision decision; diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index d2c950722c568..a3dce45daf101 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -64,6 +64,7 @@ import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MockGatewayMetaState; import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.disruption.DisruptableMockTransport; import org.elasticsearch.test.disruption.DisruptableMockTransport.ConnectionStatus; @@ -99,6 +100,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; import static org.elasticsearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.DEFAULT_DELAY_VARIABILITY; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.CoordinationStateTestCluster.clusterState; @@ -503,7 +505,8 @@ void stabilise(long stabilisationDurationMillis) { if (isConnectedPair(leader, clusterNode)) { assertThat(nodeId + " is a follower of " + leaderId, clusterNode.coordinator.getMode(), is(FOLLOWER)); assertThat(nodeId + " has the same term as " + leaderId, clusterNode.coordinator.getCurrentTerm(), is(leaderTerm)); - assertTrue(nodeId + " has voted for " + leaderId, leader.coordinator.hasJoinVoteFrom(clusterNode.getLocalNode())); + assertFalse(nodeId + " is not a missing vote for " + leaderId, + leader.coordinator.missingJoinVoteFrom(clusterNode.getLocalNode())); assertThat(nodeId + " has the same accepted state as " + leaderId, clusterNode.coordinator.getLastAcceptedState().getVersion(), isEqualToLeaderVersion); if (clusterNode.getClusterStateApplyResponse() == ClusterStateApplyResponse.SUCCEED) { @@ -723,18 +726,59 @@ class MockPersistedState implements CoordinationState.PersistedState { nodeEnvironment = null; BytesStreamOutput outStream = new BytesStreamOutput(); outStream.setVersion(Version.CURRENT); - final MetaData updatedMetaData = adaptGlobalMetaData.apply(oldState.getLastAcceptedState().metaData()); - final ClusterState clusterState; - if (updatedMetaData != oldState.getLastAcceptedState().metaData()) { - clusterState = ClusterState.builder(oldState.getLastAcceptedState()).metaData(updatedMetaData).build(); + + final long persistedCurrentTerm; + + if ( // node is master-ineligible either before or after the restart ... + (oldState.getLastAcceptedState().nodes().getLocalNode().isMasterNode() && newLocalNode.isMasterNode()) == false + // ... and it's accepted some non-initial state so we can roll back ... + && (oldState.getLastAcceptedState().term() > 0L || oldState.getLastAcceptedState().version() > 0L) + // ... and we're feeling lucky ... + && randomBoolean()) { + + // ... then we might not have reliably persisted the cluster state, so emulate a rollback + + persistedCurrentTerm = randomLongBetween(0L, oldState.getCurrentTerm()); + final long lastAcceptedTerm = oldState.getLastAcceptedState().term(); + final long lastAcceptedVersion = oldState.getLastAcceptedState().version(); + + final long newLastAcceptedTerm; + final long newLastAcceptedVersion; + + if (lastAcceptedVersion == 0L) { + newLastAcceptedTerm = randomLongBetween(0L, Math.min(persistedCurrentTerm, lastAcceptedTerm - 1)); + newLastAcceptedVersion = randomNonNegativeLong(); + } else { + newLastAcceptedTerm = randomLongBetween(0L, Math.min(persistedCurrentTerm, lastAcceptedTerm)); + newLastAcceptedVersion = randomLongBetween(0L, + newLastAcceptedTerm == lastAcceptedTerm ? lastAcceptedVersion - 1 : Long.MAX_VALUE); + } + final VotingConfiguration newVotingConfiguration + = new VotingConfiguration(randomBoolean() ? emptySet() : singleton(randomAlphaOfLength(10))); + final long newValue = randomLong(); + + logger.trace("rolling back persisted cluster state on master-ineligible node [{}]: " + + "previously currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={} " + + "but now currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={}", newLocalNode, + oldState.getCurrentTerm(), lastAcceptedTerm, lastAcceptedVersion, + persistedCurrentTerm, newLastAcceptedTerm, newLastAcceptedVersion); + + clusterState(newLastAcceptedTerm, newLastAcceptedVersion, newLocalNode, newVotingConfiguration, + newVotingConfiguration, newValue).writeTo(outStream); } else { - clusterState = oldState.getLastAcceptedState(); + persistedCurrentTerm = oldState.getCurrentTerm(); + final MetaData updatedMetaData = adaptGlobalMetaData.apply(oldState.getLastAcceptedState().metaData()); + if (updatedMetaData != oldState.getLastAcceptedState().metaData()) { + ClusterState.builder(oldState.getLastAcceptedState()).metaData(updatedMetaData).build().writeTo(outStream); + } else { + oldState.getLastAcceptedState().writeTo(outStream); + } } - clusterState.writeTo(outStream); + StreamInput inStream = new NamedWriteableAwareStreamInput(outStream.bytes().streamInput(), new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); // adapt cluster state to new localNode instance and add blocks - delegate = new InMemoryPersistedState(adaptCurrentTerm.apply(oldState.getCurrentTerm()), + delegate = new InMemoryPersistedState(adaptCurrentTerm.apply(persistedCurrentTerm), ClusterStateUpdaters.addStateNotRecoveredBlock(ClusterState.readFrom(inStream, newLocalNode))); } } catch (IOException e) { @@ -843,11 +887,11 @@ protected Optional getDisruptableMockTransport(Transpo final AllocationService allocationService = ESAllocationTestCase.createAllocationService(Settings.EMPTY); coordinator = new Coordinator("test_node", settings, clusterSettings, transportService, writableRegistry(), allocationService, masterService, this::getPersistedState, - Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get(), (s, r) -> {}, + Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get(), (s, p, r) -> {}, getElectionStrategy()); masterService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService(settings, allocationService, clusterService, - deterministicTaskQueue.getThreadPool(this::onNode), null, coordinator); + deterministicTaskQueue.getThreadPool(this::onNode), coordinator, null); logger.trace("starting up [{}]", localNode); transportService.start(); @@ -880,7 +924,8 @@ ClusterNode restartedNode(Function adaptGlobalMetaData, Func final DiscoveryNode newLocalNode = new DiscoveryNode(localNode.getName(), localNode.getId(), UUIDs.randomBase64UUID(random()), // generated deterministically for repeatable tests address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(), - localNode.isMasterNode() ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(), Version.CURRENT); + localNode.isMasterNode() && Node.NODE_MASTER_SETTING.get(nodeSettings) + ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(), Version.CURRENT); return new ClusterNode(nodeIndex, newLocalNode, node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetaData, adaptCurrentTerm), nodeSettings); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/CoordinationStateTestCluster.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/CoordinationStateTestCluster.java index 582ecd1fcface..69e2ba4113cb1 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/CoordinationStateTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/CoordinationStateTestCluster.java @@ -23,11 +23,13 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Set; @@ -36,6 +38,7 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.rarely; import static java.util.stream.Collectors.toSet; import static org.apache.lucene.util.LuceneTestCase.random; +import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.ESTestCase.randomIntBetween; import static org.elasticsearch.test.ESTestCase.randomLong; @@ -86,10 +89,10 @@ public static long value(ClusterState clusterState) { } static class ClusterNode { - - final DiscoveryNode localNode; - final CoordinationState.PersistedState persistedState; private final ElectionStrategy electionStrategy; + + DiscoveryNode localNode; + CoordinationState.PersistedState persistedState; CoordinationState state; ClusterNode(DiscoveryNode localNode, ElectionStrategy electionStrategy) { @@ -102,6 +105,26 @@ static class ClusterNode { } void reboot() { + if (localNode.isMasterNode() == false && rarely()) { + // master-ineligible nodes can't be trusted to persist the cluster state properly + persistedState = new InMemoryPersistedState(0L, + clusterState(0L, 0L, localNode, CoordinationMetaData.VotingConfiguration.EMPTY_CONFIG, + CoordinationMetaData.VotingConfiguration.EMPTY_CONFIG, 0L)); + } + + final Set roles = new HashSet<>(localNode.getRoles()); + if (randomBoolean()) { + if (roles.contains(DiscoveryNodeRole.MASTER_ROLE)) { + roles.remove(DiscoveryNodeRole.MASTER_ROLE); + } else { + roles.add(DiscoveryNodeRole.MASTER_ROLE); + } + } + + localNode = new DiscoveryNode(localNode.getName(), localNode.getId(), UUIDs.randomBase64UUID(random()), + localNode.getHostName(), localNode.getHostAddress(), localNode.getAddress(), localNode.getAttributes(), + roles, localNode.getVersion()); + state = new CoordinationState(localNode, persistedState, electionStrategy); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java index a854d40355739..0abbcf863ca71 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java @@ -39,6 +39,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; @@ -227,13 +228,27 @@ public String toString() { * @return true iff the history is linearizable w.r.t. the given spec */ public boolean isLinearizable(SequentialSpec spec, History history, Function missingResponseGenerator) { + return isLinearizable(spec, history, missingResponseGenerator, () -> false); + } + + /** + * Checks whether the provided history is linearizable with respect to the given sequential specification + * + * @param spec the sequential specification of the datatype + * @param history the history of events to check for linearizability + * @param missingResponseGenerator used to complete the history with missing responses + * @param terminateEarly a condition upon which to terminate early + * @return true iff the history is linearizable w.r.t. the given spec + */ + public boolean isLinearizable(SequentialSpec spec, History history, Function missingResponseGenerator, + BooleanSupplier terminateEarly) { history = history.clone(); // clone history before completing it history.complete(missingResponseGenerator); // complete history final Collection> partitions = spec.partition(history.copyEvents()); - return partitions.stream().allMatch(h -> isLinearizable(spec, h)); + return partitions.stream().allMatch(h -> isLinearizable(spec, h, terminateEarly)); } - private boolean isLinearizable(SequentialSpec spec, List history) { + private boolean isLinearizable(SequentialSpec spec, List history, BooleanSupplier terminateEarly) { logger.debug("Checking history of size: {}: {}", history.size(), history); Object state = spec.initialState(); // the current state of the datatype final FixedBitSet linearized = new FixedBitSet(history.size() / 2); // the linearized prefix of the history @@ -245,6 +260,9 @@ private boolean isLinearizable(SequentialSpec spec, List history) { Entry entry = headEntry.next; // current entry while (headEntry.next != null) { + if (terminateEarly.getAsBoolean()) { + return false; + } if (entry.match != null) { final Optional maybeNextState = spec.nextState(state, entry.event.value, entry.match.event.value); boolean shouldExploreNextState = false; diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java index fa8f3d7d27018..15f4b17ba6f8c 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java @@ -30,7 +30,7 @@ * Parsing log lines with this class confirms the json format of logs */ public class JsonLogLine { - public static final ObjectParser PARSER = createParser(false); + public static final ObjectParser PARSER = createParser(true); private String type; private String timestamp; diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java index 28ad649f55a79..ad6fedca2d928 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java @@ -30,6 +30,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; import java.util.Spliterator; import java.util.Spliterators; import java.util.stream.Stream; @@ -57,10 +59,20 @@ public static Stream from(Path path) throws IOException { return from(Files.newBufferedReader(path)); } + public static Stream> mapStreamFrom(Path path) throws IOException { + return new JsonLogsStream(Files.newBufferedReader(path)).streamMap(); + } + private Stream stream() { Spliterator spliterator = Spliterators.spliteratorUnknownSize(new JsonIterator(), Spliterator.ORDERED); return StreamSupport.stream(spliterator, false) - .onClose(this::close); + .onClose(this::close); + } + + private Stream> streamMap() { + Spliterator> spliterator = Spliterators.spliteratorUnknownSize(new MapIterator(), Spliterator.ORDERED); + return StreamSupport.stream(spliterator, false) + .onClose(this::close); } private void close() { @@ -72,6 +84,26 @@ private void close() { } } + private class MapIterator implements Iterator> { + + @Override + public boolean hasNext() { + return parser.isClosed() == false; + } + + @Override + public Map next() { + Map map; + try { + map = parser.map(LinkedHashMap::new, XContentParser::text); + parser.nextToken(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + return map; + } + } + private class JsonIterator implements Iterator { @Override diff --git a/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java b/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java new file mode 100644 index 0000000000000..626f9b618d7b7 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java @@ -0,0 +1,179 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.geo; + +import org.apache.lucene.geo.GeoTestUtil; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; + +public class GeometryTestUtils { + + public static double randomLat() { + return GeoTestUtil.nextLatitude(); + } + + public static double randomLon() { + return GeoTestUtil.nextLongitude(); + } + + public static double randomAlt() { + return ESTestCase.randomDouble(); + } + + public static Circle randomCircle(boolean hasAlt) { + if (hasAlt) { + return new Circle(randomLat(), randomLon(), ESTestCase.randomDouble(), + ESTestCase.randomDoubleBetween(0, 100, false)); + } else { + return new Circle(randomLat(), randomLon(), ESTestCase.randomDoubleBetween(0, 100, false)); + } + } + + public static Line randomLine(boolean hasAlts) { + int size = ESTestCase.randomIntBetween(2, 10); + double[] lats = new double[size]; + double[] lons = new double[size]; + double[] alts = hasAlts ? new double[size] : null; + for (int i = 0; i < size; i++) { + lats[i] = randomLat(); + lons[i] = randomLon(); + if (hasAlts) { + alts[i] = randomAlt(); + } + } + if (hasAlts) { + return new Line(lats, lons, alts); + } + return new Line(lats, lons); + } + + public static Point randomPoint() { + return randomPoint(ESTestCase.randomBoolean()); + } + + public static Point randomPoint(boolean hasAlt) { + if (hasAlt) { + return new Point(randomLat(), randomLon(), randomAlt()); + } else { + return new Point(randomLat(), randomLon()); + } + } + + public static Polygon randomPolygon(boolean hasAlt) { + org.apache.lucene.geo.Polygon lucenePolygon = GeoTestUtil.nextPolygon(); + if (lucenePolygon.numHoles() > 0) { + org.apache.lucene.geo.Polygon[] luceneHoles = lucenePolygon.getHoles(); + List holes = new ArrayList<>(); + for (int i = 0; i < lucenePolygon.numHoles(); i++) { + holes.add(linearRing(luceneHoles[i], hasAlt)); + } + return new Polygon(linearRing(lucenePolygon, hasAlt), holes); + } + return new Polygon(linearRing(lucenePolygon, hasAlt)); + } + + + private static double[] randomAltRing(int size) { + double[] alts = new double[size]; + for (int i = 0; i < size - 1; i++) { + alts[i] = randomAlt(); + } + alts[size - 1] = alts[0]; + return alts; + } + + private static LinearRing linearRing(org.apache.lucene.geo.Polygon polygon, boolean generateAlts) { + if (generateAlts) { + return new LinearRing(polygon.getPolyLats(), polygon.getPolyLons(), randomAltRing(polygon.numPoints())); + } else { + return new LinearRing(polygon.getPolyLats(), polygon.getPolyLons()); + } + } + + public static Rectangle randomRectangle() { + org.apache.lucene.geo.Rectangle rectangle = GeoTestUtil.nextBox(); + return new Rectangle(rectangle.minLat, rectangle.maxLat, rectangle.minLon, rectangle.maxLon); + } + + public static MultiPoint randomMultiPoint(boolean hasAlt) { + int size = ESTestCase.randomIntBetween(3, 10); + List points = new ArrayList<>(); + for (int i = 0; i < size; i++) { + points.add(randomPoint(hasAlt)); + } + return new MultiPoint(points); + } + + public static MultiLine randomMultiLine(boolean hasAlt) { + int size = ESTestCase.randomIntBetween(3, 10); + List lines = new ArrayList<>(); + for (int i = 0; i < size; i++) { + lines.add(randomLine(hasAlt)); + } + return new MultiLine(lines); + } + + public static MultiPolygon randomMultiPolygon(boolean hasAlt) { + int size = ESTestCase.randomIntBetween(3, 10); + List polygons = new ArrayList<>(); + for (int i = 0; i < size; i++) { + polygons.add(randomPolygon(hasAlt)); + } + return new MultiPolygon(polygons); + } + + public static GeometryCollection randomGeometryCollection(boolean hasAlt) { + return randomGeometryCollection(0, hasAlt); + } + + private static GeometryCollection randomGeometryCollection(int level, boolean hasAlt) { + int size = ESTestCase.randomIntBetween(1, 10); + List shapes = new ArrayList<>(); + for (int i = 0; i < size; i++) { + @SuppressWarnings("unchecked") Function geometry = ESTestCase.randomFrom( + GeometryTestUtils::randomCircle, + GeometryTestUtils::randomLine, + GeometryTestUtils::randomPoint, + GeometryTestUtils::randomPolygon, + GeometryTestUtils::randomMultiLine, + GeometryTestUtils::randomMultiPoint, + GeometryTestUtils::randomMultiPolygon, + hasAlt ? GeometryTestUtils::randomPoint : (b) -> randomRectangle(), + level < 3 ? (b) -> randomGeometryCollection(level + 1, b) : GeometryTestUtils::randomPoint // don't build too deep + ); + shapes.add(geometry.apply(hasAlt)); + } + return new GeometryCollection<>(shapes); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 47a8f73ef62c9..e36f5e3999076 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -635,7 +635,7 @@ protected final void recoverUnstartedReplica(final IndexShard replica, final StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), targetAllocationId, pNode, rNode, snapshot, replica.routingEntry().primary(), 0, startingSeqNo); final RecoverySourceHandler recovery = new RecoverySourceHandler(primary, - new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), + new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), threadPool, request, Math.toIntExact(ByteSizeUnit.MB.toBytes(1)), between(1, 8)); primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 86b6014b87f87..304a3963ff4e0 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -36,6 +36,7 @@ import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Map.entry; @@ -273,19 +274,25 @@ public Map> getPreConfiguredCharFilters() { } public void testTokenizers() { - Set missing = new TreeSet(org.apache.lucene.analysis.util.TokenizerFactory.availableTokenizers()); + Set missing = new TreeSet(); + missing.addAll(org.apache.lucene.analysis.util.TokenizerFactory.availableTokenizers() + .stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet())); missing.removeAll(getTokenizers().keySet()); assertTrue("new tokenizers found, please update KNOWN_TOKENIZERS: " + missing.toString(), missing.isEmpty()); } public void testCharFilters() { - Set missing = new TreeSet(org.apache.lucene.analysis.util.CharFilterFactory.availableCharFilters()); + Set missing = new TreeSet(); + missing.addAll(org.apache.lucene.analysis.util.CharFilterFactory.availableCharFilters() + .stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet())); missing.removeAll(getCharFilters().keySet()); assertTrue("new charfilters found, please update KNOWN_CHARFILTERS: " + missing.toString(), missing.isEmpty()); } public void testTokenFilters() { - Set missing = new TreeSet(org.apache.lucene.analysis.util.TokenFilterFactory.availableTokenFilters()); + Set missing = new TreeSet(); + missing.addAll(org.apache.lucene.analysis.util.TokenFilterFactory.availableTokenFilters() + .stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet())); missing.removeAll(getTokenFilters().keySet()); assertTrue("new tokenfilters found, please update KNOWN_TOKENFILTERS: " + missing.toString(), missing.isEmpty()); } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java index afd2aa4e85888..1f4198d00a433 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java @@ -83,7 +83,6 @@ public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.Metada @Override public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener listener) { - // TODO: remove this clone once we send file chunk async final BytesReference copy = new BytesArray(BytesRef.deepCopyOf(content.toBytesRef())); executor.execute(() -> target.writeFileChunk(fileMetaData, position, copy, lastChunk, totalTranslogOps, listener)); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index 2359a2c5d99f6..3fd67b99ee840 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -261,9 +261,9 @@ protected void doRun() throws Exception { final BlobStore blobStore = repo.blobStore(); future.onResponse( blobStore.blobContainer(BlobPath.cleanPath().add("indices")).children().containsKey("foo") - && blobStore.blobContainer(BlobPath.cleanPath().add("indices").add("foo")).blobExists("bar") - && blobStore.blobContainer(BlobPath.cleanPath()).blobExists("meta-foo.dat") - && blobStore.blobContainer(BlobPath.cleanPath()).blobExists("snap-foo.dat") + && BlobStoreTestUtil.blobExists(blobStore.blobContainer(BlobPath.cleanPath().add("indices").add("foo")), "bar") + && BlobStoreTestUtil.blobExists(blobStore.blobContainer(BlobPath.cleanPath()), "meta-foo.dat") + && BlobStoreTestUtil.blobExists(blobStore.blobContainer(BlobPath.cleanPath()), "snap-foo.dat") ); } }); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java index a32d841927360..fe6f059fd38e3 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -47,8 +48,8 @@ public void testContainerCreationAndDeletion() throws IOException { assertArrayEquals(readBlobFully(containerFoo, "test", data1.length), data1); assertArrayEquals(readBlobFully(containerBar, "test", data2.length), data2); - assertTrue(containerFoo.blobExists("test")); - assertTrue(containerBar.blobExists("test")); + assertTrue(BlobStoreTestUtil.blobExists(containerFoo, "test")); + assertTrue(BlobStoreTestUtil.blobExists(containerBar, "test")); } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java index 27161d626ab9c..6bb640762f329 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java @@ -22,6 +22,9 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -35,18 +38,25 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.threadpool.ThreadPool; +import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.file.NoSuchFileException; import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.Locale; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasKey; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -60,6 +70,14 @@ public static void assertRepoConsistency(InternalTestCluster testCluster, String BlobStoreTestUtil.assertConsistency(repo, repo.threadPool().executor(ThreadPool.Names.GENERIC)); } + public static boolean blobExists(BlobContainer container, String blobName) throws IOException { + try (InputStream ignored = container.readBlob(blobName)) { + return true; + } catch (NoSuchFileException e) { + return false; + } + } + /** * Assert that there are no unreferenced indices or unreferenced root-level metadata blobs in any repository. * TODO: Expand the logic here to also check for unreferenced segment blobs and shard level metadata @@ -74,11 +92,11 @@ public static void assertConsistency(BlobStoreRepository repository, Executor ex @Override protected void doRun() throws Exception { final BlobContainer blobContainer = repository.blobContainer(); - assertTrue( - "Could not find index.latest blob for repo [" + repository + "]", blobContainer.blobExists("index.latest")); final long latestGen; try (DataInputStream inputStream = new DataInputStream(blobContainer.readBlob("index.latest"))) { latestGen = inputStream.readLong(); + } catch (NoSuchFileException e) { + throw new AssertionError("Could not find index.latest blob for repo [" + repository + "]"); } assertIndexGenerations(blobContainer, latestGen); final RepositoryData repositoryData; @@ -159,4 +177,73 @@ private static void assertSnapshotUUIDs(BlobStoreRepository repository, Reposito } } } + + public static long createDanglingIndex(BlobStoreRepository repository, String name, Set files) + throws InterruptedException, ExecutionException { + final PlainActionFuture future = PlainActionFuture.newFuture(); + final AtomicLong totalSize = new AtomicLong(); + repository.threadPool().generic().execute(new ActionRunnable<>(future) { + @Override + protected void doRun() throws Exception { + final BlobStore blobStore = repository.blobStore(); + BlobContainer container = + blobStore.blobContainer(repository.basePath().add("indices").add(name)); + for (String file : files) { + int size = randomIntBetween(0, 10); + totalSize.addAndGet(size); + container.writeBlob(file, new ByteArrayInputStream(new byte[size]), size, false); + } + future.onResponse(null); + } + }); + future.get(); + return totalSize.get(); + } + + public static void assertCorruptionVisible(BlobStoreRepository repository, Map> indexToFiles) { + final PlainActionFuture future = PlainActionFuture.newFuture(); + repository.threadPool().generic().execute(new ActionRunnable<>(future) { + @Override + protected void doRun() throws Exception { + final BlobStore blobStore = repository.blobStore(); + for (String index : indexToFiles.keySet()) { + if (blobStore.blobContainer(repository.basePath().add("indices")) + .children().containsKey(index) == false) { + future.onResponse(false); + return; + } + for (String file : indexToFiles.get(index)) { + try (InputStream ignored = + blobStore.blobContainer(repository.basePath().add("indices").add(index)).readBlob(file)) { + } catch (NoSuchFileException e) { + future.onResponse(false); + return; + } + } + } + future.onResponse(true); + } + }); + assertTrue(future.actionGet()); + } + + public static void assertBlobsByPrefix(BlobStoreRepository repository, BlobPath path, String prefix, Map blobs) { + final PlainActionFuture> future = PlainActionFuture.newFuture(); + repository.threadPool().generic().execute(new ActionRunnable<>(future) { + @Override + protected void doRun() throws Exception { + final BlobStore blobStore = repository.blobStore(); + future.onResponse(blobStore.blobContainer(path).listBlobsByPrefix(prefix)); + } + }); + Map foundBlobs = future.actionGet(); + if (blobs.isEmpty()) { + assertThat(foundBlobs.keySet(), empty()); + } else { + assertThat(foundBlobs.keySet(), containsInAnyOrder(blobs.keySet().toArray(Strings.EMPTY_ARRAY))); + for (Map.Entry entry : foundBlobs.entrySet()) { + assertEquals(entry.getValue().length(), blobs.get(entry.getKey()).length()); + } + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 947a2c9887c10..f468b235505a4 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -269,7 +269,7 @@ public void testIndicesDeletedFromRepository() throws Exception { latch.await(); for (IndexId indexId : repositoryData.get().getIndices().values()) { if (indexId.getName().equals("test-idx-3")) { - assertFalse(indicesBlobContainer.get().blobExists(indexId.getId())); // deleted index + assertFalse(BlobStoreTestUtil.blobExists(indicesBlobContainer.get(), indexId.getId())); // deleted index } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableTestCase.java deleted file mode 100644 index 15ce07e977514..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableTestCase.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.test; - -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.io.stream.Writeable; - -import java.io.IOException; - -public abstract class AbstractStreamableTestCase extends AbstractWireTestCase { - - @Override - protected final T copyInstance(T instance, Version version) throws IOException { - return copyStreamable(instance, getNamedWriteableRegistry(), this::createBlankInstance, version); - } - - @Override - protected final Writeable.Reader instanceReader() { - return Streamable.newWriteableReader(this::createBlankInstance); - } - - /** - * Creates an empty instance to use when deserialising the - * {@link Streamable}. This usually returns an instance created using the - * zer-arg constructor - */ - protected abstract T createBlankInstance(); -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java deleted file mode 100644 index 402981cd705f6..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.test; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; - -import java.io.IOException; -import java.util.function.Predicate; - -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; - -public abstract class AbstractStreamableXContentTestCase extends AbstractStreamableTestCase { - - /** - * Generic test that creates new instance from the test instance and checks - * both for equality and asserts equality on the two queries. - */ - public final void testFromXContent() throws IOException { - xContentTester(this::createParser, this::createXContextTestInstance, getToXContentParams(), this::doParseInstance) - .numberOfTestRuns(NUMBER_OF_TEST_RUNS) - .supportsUnknownFields(supportsUnknownFields()) - .shuffleFieldsExceptions(getShuffleFieldsExceptions()) - .randomFieldsExcludeFilter(getRandomFieldsExcludeFilter()) - .assertEqualsConsumer(this::assertEqualInstances) - .assertToXContentEquivalence(true) - .test(); - } - - /** - * Creates a random instance to use in the xcontent tests. - * Override this method if the random instance that you build - * should be aware of the {@link XContentType} used in the test. - */ - protected T createXContextTestInstance(XContentType xContentType) { - return createTestInstance(); - } - - /** - * Parses to a new instance using the provided {@link XContentParser} - */ - protected abstract T doParseInstance(XContentParser parser) throws IOException; - - /** - * Indicates whether the parser supports unknown fields or not. In case it does, such behaviour will be tested by - * inserting random fields before parsing and checking that they don't make parsing fail. - */ - protected boolean supportsUnknownFields() { - return true; - } - - /** - * Returns a predicate that given the field name indicates whether the field has to be excluded from random fields insertion or not - */ - protected Predicate getRandomFieldsExcludeFilter() { - return field -> false; - } - - /** - * Fields that have to be ignored when shuffling as part of testFromXContent - */ - protected String[] getShuffleFieldsExceptions() { - return Strings.EMPTY_ARRAY; - } - - /** - * Params that have to be provided when calling calling {@link ToXContent#toXContent(XContentBuilder, ToXContent.Params)} - */ - protected ToXContent.Params getToXContentParams() { - return ToXContent.EMPTY_PARAMS; - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 7a39f8be69b62..48d7d6a1a75e4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1360,7 +1360,9 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } final List actualErrors = new ArrayList<>(); for (Tuple tuple : errors) { - if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) { + Throwable t = ExceptionsHelper.unwrapCause(tuple.v2()); + if (t instanceof EsRejectedExecutionException) { + logger.debug("Error indexing doc: " + t.getMessage() + ", reindexing."); tuple.v1().execute().actionGet(); // re-index if rejected } else { actualErrors.add(tuple.v2()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index ba0421b409ae5..98fd38bfeea09 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -64,7 +64,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.LogConfigurator; @@ -186,14 +185,14 @@ public static void resetPortCounter() { } // Allows distinguishing between parallel test processes - public static final int TEST_WORKER_VM; + public static final String TEST_WORKER_VM_ID; - protected static final String TEST_WORKER_SYS_PROPERTY = "org.gradle.test.worker"; + public static final String TEST_WORKER_SYS_PROPERTY = "org.gradle.test.worker"; + + public static final String DEFAULT_TEST_WORKER_ID = "--not-gradle--"; static { - // org.gradle.test.worker starts counting at 1, but we want to start counting at 0 here - // in case system property is not defined (e.g. when running test from IDE), just use 0 - TEST_WORKER_VM = RandomizedTest.systemPropertyAsInt(TEST_WORKER_SYS_PROPERTY, 1) - 1; + TEST_WORKER_VM_ID = System.getProperty(TEST_WORKER_SYS_PROPERTY, DEFAULT_TEST_WORKER_ID); setTestSysProps(); LogConfigurator.loadLog4jPlugins(); @@ -1140,18 +1139,6 @@ public static T copyWriteable(T original, NamedWriteableRe return copyInstance(original, namedWriteableRegistry, (out, value) -> value.writeTo(out), reader, version); } - /** - * Create a copy of an original {@link Streamable} object by running it through a {@link BytesStreamOutput} and - * reading it in again using a provided {@link Writeable.Reader}. The stream that is wrapped around the {@link StreamInput} - * potentially need to use a {@link NamedWriteableRegistry}, so this needs to be provided too (although it can be - * empty if the object that is streamed doesn't contain any {@link NamedWriteable} objects itself. - */ - public static T copyStreamable(T original, NamedWriteableRegistry namedWriteableRegistry, - Supplier supplier, Version version) throws IOException { - return copyInstance(original, namedWriteableRegistry, (out, value) -> value.writeTo(out), - Streamable.newWriteableReader(supplier), version); - } - protected static T copyInstance(T original, NamedWriteableRegistry namedWriteableRegistry, Writeable.Writer writer, Writeable.Reader reader, Version version) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index ffb5bb36b2245..4476753911913 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -505,7 +505,7 @@ private static Settings getRandomNodeSettings(long seed) { public static String clusterName(String prefix, long clusterSeed) { StringBuilder builder = new StringBuilder(prefix); - builder.append("-TEST_WORKER_VM=[").append(ESTestCase.TEST_WORKER_VM).append(']'); + builder.append("-TEST_WORKER_VM=[").append(ESTestCase.TEST_WORKER_VM_ID).append(']'); builder.append("-CLUSTER_SEED=[").append(clusterSeed).append(']'); // if multiple maven task run on a single host we better have an identifier that doesn't rely on input params builder.append("-HASH=[").append(SeedUtils.formatSeed(System.nanoTime())).append(']'); @@ -1485,6 +1485,15 @@ public synchronized void stopRandomNode(final Predicate filter) throws ensureOpen(); NodeAndClient nodeAndClient = getRandomNodeAndClient(nc -> filter.test(nc.node.settings())); if (nodeAndClient != null) { + if (nodePrefix.equals(ESIntegTestCase.SUITE_CLUSTER_NODE_PREFIX) && nodeAndClient.nodeAndClientId() < sharedNodesSeeds.length + && nodeAndClient.isMasterEligible() + && autoManageMasterNodes + && nodes.values().stream() + .filter(NodeAndClient::isMasterEligible) + .filter(n -> n.nodeAndClientId() < sharedNodesSeeds.length) + .count() == 1) { + throw new AssertionError("Tried to stop the only master eligible shared node"); + } logger.info("Closing filtered random node [{}] ", nodeAndClient.name); stopNodesAndClient(nodeAndClient); } @@ -2216,16 +2225,15 @@ public void ensureEstimatedStats() { CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA); assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L)); - // Mute this assertion until we have a new Lucene snapshot with https://issues.apache.org/jira/browse/LUCENE-8809. - // try { - // assertBusy(() -> { - // CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - // assertThat("Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", - // acctBreaker.getUsed(), equalTo(0L)); - // }); - // } catch (Exception e) { - // throw new AssertionError("Exception during check for accounting breaker reset to 0", e); - // } + try { + assertBusy(() -> { + CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); + assertThat("Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", + acctBreaker.getUsed(), equalTo(0L)); + }); + } catch (Exception e) { + throw new AssertionError("Exception during check for accounting breaker reset to 0", e); + } // Anything that uses transport or HTTP can increase the // request breaker (because they use bigarrays), because of diff --git a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java index 2af37bc238bdb..69bd367568ad5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java @@ -19,9 +19,14 @@ package org.elasticsearch.test; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.AnalyzeToken; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo.Failure; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -43,7 +48,9 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Base64; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Random; import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; @@ -341,4 +348,43 @@ private static Tuple randomShardInfoFailure(Random random) { return Tuple.tuple(actual, expected); } + + public static AnalyzeToken randomToken(Random random) { + String token = RandomStrings.randomAsciiLettersOfLengthBetween(random, 1, 20); + int position = RandomizedTest.randomIntBetween(0, 1000); + int startOffset = RandomizedTest.randomIntBetween(0, 1000); + int endOffset = RandomizedTest.randomIntBetween(0, 1000); + int posLength = RandomizedTest.randomIntBetween(1, 5); + String type = RandomStrings.randomAsciiLettersOfLengthBetween(random, 1, 20); + Map extras = new HashMap<>(); + if (random.nextBoolean()) { + int entryCount = RandomNumbers.randomIntBetween(random, 0, 6); + for (int i = 0; i < entryCount; i++) { + switch (RandomNumbers.randomIntBetween(random, 0, 6)) { + case 0: + case 1: + case 2: + case 3: + String key = RandomStrings.randomAsciiLettersOfLength(random, 5); + String value = RandomStrings.randomAsciiLettersOfLength(random, 10); + extras.put(key, value); + break; + case 4: + String objkey = RandomStrings.randomAsciiLettersOfLength(random, 5); + Map obj = new HashMap<>(); + obj.put(RandomStrings.randomAsciiLettersOfLength(random, 5), RandomStrings.randomAsciiLettersOfLength(random, 10)); + extras.put(objkey, obj); + break; + case 5: + String listkey = RandomStrings.randomAsciiLettersOfLength(random, 5); + List list = new ArrayList<>(); + list.add(RandomStrings.randomAsciiLettersOfLength(random, 4)); + list.add(RandomStrings.randomAsciiLettersOfLength(random, 6)); + extras.put(listkey, list); + break; + } + } + } + return new AnalyzeAction.AnalyzeToken(token, position, startOffset, endOffset, posLength, type, extras); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestIssueLogging.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestIssueLogging.java new file mode 100644 index 0000000000000..8e02b4dce1467 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestIssueLogging.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.junit.annotations; + +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.METHOD; +import static java.lang.annotation.ElementType.PACKAGE; +import static java.lang.annotation.ElementType.TYPE; + +/** + * Annotation used to set a custom log level when investigating test failures. Do not use this annotation to explicitly + * control the logging level in tests; instead, use {@link TestLogging}. + * + * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., + * org.elasticsearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target({PACKAGE, TYPE, METHOD}) +public @interface TestIssueLogging { + + /** + * A comma-separated list of key-value pairs of logger:level. For each key-value pair of logger:level, the test + * framework will set the logging level of the specified logger to the specified level. + * + * @return the logger:level pairs + */ + String value(); + + /** + * This property is used to link to the open test issue under investigation. + * + * @return the issue link + */ + String issueUrl(); + +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java index d0e799d63fefb..2354979d0606f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.test.junit.annotations; import java.lang.annotation.Retention; @@ -27,14 +28,29 @@ import static java.lang.annotation.ElementType.TYPE; /** - * Annotation used to set a custom log level for a specific test method. + * Annotation used to set a custom log level for controlling logging behavior in tests. Do not use this annotation when + * investigating test failures; instead, use {@link TestIssueLogging}. * - * It supports multiple logger:level comma separated key value pairs - * Use the _root keyword to set the root logger level - * e.g. @TestLogging("_root:DEBUG,org.elasticsearch.cluster.metadata:TRACE") + * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., + * org.elasticsearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ @Retention(RetentionPolicy.RUNTIME) @Target({PACKAGE, TYPE, METHOD}) public @interface TestLogging { + + /** + * A comma-separated list of key-value pairs of logger:level. For each key-value pair of logger:level, the test + * framework will set the logging level of the specified logger to the specified level. + * + * @return the logger:level pairs + */ String value(); + + /** + * The reason this annotation is used to control logger behavior during a test. + * + * @return the reason for adding the annotation + */ + String reason(); + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index 56f77bcefc81a..1126adf8553ac 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -22,14 +22,19 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.runner.Description; import org.junit.runner.Result; import org.junit.runner.notification.RunListener; import java.util.Collections; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.TreeMap; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A {@link RunListener} that allows changing the log level for a specific test method. When a test method is annotated with the @@ -49,8 +54,12 @@ public class LoggingListener extends RunListener { @Override public void testRunStarted(final Description description) throws Exception { Package testClassPackage = description.getTestClass().getPackage(); - previousPackageLoggingMap = processTestLogging(testClassPackage != null ? testClassPackage.getAnnotation(TestLogging.class) : null); - previousClassLoggingMap = processTestLogging(description.getAnnotation(TestLogging.class)); + previousPackageLoggingMap = processTestLogging( + testClassPackage != null ? testClassPackage.getAnnotation(TestLogging.class) : null, + testClassPackage != null ? testClassPackage.getAnnotation(TestIssueLogging.class) : null); + previousClassLoggingMap = processTestLogging( + description.getAnnotation(TestLogging.class), + description.getAnnotation(TestIssueLogging.class)); } @Override @@ -62,7 +71,8 @@ public void testRunFinished(final Result result) throws Exception { @Override public void testStarted(final Description description) throws Exception { final TestLogging testLogging = description.getAnnotation(TestLogging.class); - previousLoggingMap = processTestLogging(testLogging); + final TestIssueLogging testIssueLogging = description.getAnnotation(TestIssueLogging.class); + previousLoggingMap = processTestLogging(testLogging, testIssueLogging); } @Override @@ -89,22 +99,35 @@ private static Logger resolveLogger(String loggerName) { * @param testLogging the test logging annotation to apply * @return the existing logging levels */ - private Map processTestLogging(final TestLogging testLogging) { - final Map map = getLoggersAndLevelsFromAnnotation(testLogging); - - if (map == null) { - return Collections.emptyMap(); + private Map processTestLogging(final TestLogging testLogging, final TestIssueLogging testIssueLogging) { + final Map testLoggingMap = getLoggersAndLevelsFromAnnotation(testLogging); + final Map testIssueLoggingMap = getLoggersAndLevelsFromAnnotation(testIssueLogging); + + final Set testLoggingKeys = new HashSet<>(testLoggingMap.keySet()); + testLoggingKeys.retainAll(testIssueLoggingMap.keySet()); + if (testLoggingKeys.isEmpty() == false) { + throw new IllegalArgumentException("found intersection " + testLoggingKeys + " between TestLogging and TestIssueLogging"); } - // obtain the existing logging levels so that we can restore them at the end of the test; we have to do this separately from setting - // the logging levels so that setting foo does not impact the logging level for foo.bar when we check the existing logging level for - // for.bar + /* + * Use a sorted set so that we apply a parent logger before its children thus not overwriting the child setting when processing the + * parent setting. + */ + final Map loggingLevels = + new TreeMap<>(Stream.concat(testLoggingMap.entrySet().stream(), testIssueLoggingMap.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); + + /* + * Obtain the existing logging levels so that we can restore them at the end of the test. We have to do this separately from + * setting the logging levels so that setting foo does not impact the logging level for for.bar when we check the existing logging + * level for foo.bar. + */ final Map existing = new TreeMap<>(); - for (final Map.Entry entry : map.entrySet()) { + for (final Map.Entry entry : loggingLevels.entrySet()) { final Logger logger = resolveLogger(entry.getKey()); existing.put(entry.getKey(), logger.getLevel().toString()); } - for (final Map.Entry entry : map.entrySet()) { + for (final Map.Entry entry : loggingLevels.entrySet()) { final Logger logger = resolveLogger(entry.getKey()); Loggers.setLevel(logger, entry.getValue()); } @@ -121,10 +144,25 @@ private static Map getLoggersAndLevelsFromAnnotation(final TestL if (testLogging == null) { return Collections.emptyMap(); } - // use a sorted set so that we apply a parent logger before its children thus not overwriting the child setting when processing the - // parent setting + + return getLoggersAndLevelsFromAnnotationValue(testLogging.value()); + } + + private static Map getLoggersAndLevelsFromAnnotation(final TestIssueLogging testIssueLogging) { + if (testIssueLogging == null) { + return Collections.emptyMap(); + } + + return getLoggersAndLevelsFromAnnotationValue(testIssueLogging.value()); + } + + private static Map getLoggersAndLevelsFromAnnotationValue(final String value) { + /* + * Use a sorted set so that we apply a parent logger before its children thus not overwriting the child setting when processing the + * parent setting. + */ final Map map = new TreeMap<>(); - final String[] loggersAndLevels = testLogging.value().split(","); + final String[] loggersAndLevels = value.split(","); for (final String loggerAndLevel : loggersAndLevels) { final String[] loggerAndLevelArray = loggerAndLevel.split(":"); if (loggerAndLevelArray.length == 2) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 969cebc0c60b5..e7fe1d15b08a3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -140,11 +140,7 @@ public void initClient() throws IOException { assert clusterHosts == null; assert hasXPack == null; assert nodeVersions == null; - String cluster = System.getProperty("tests.rest.cluster"); - if (cluster == null) { - throw new RuntimeException("Must specify [tests.rest.cluster] system property with a comma delimited list of [host:port] " - + "to which to send REST requests"); - } + String cluster = getTestRestCluster(); String[] stringUrls = cluster.split(","); List hosts = new ArrayList<>(stringUrls.length); for (String stringUrl : stringUrls) { @@ -182,6 +178,15 @@ public void initClient() throws IOException { assert hasXPack != null; assert nodeVersions != null; } + + protected String getTestRestCluster() { + String cluster = System.getProperty("tests.rest.cluster"); + if (cluster == null) { + throw new RuntimeException("Must specify [tests.rest.cluster] system property with a comma delimited list of [host:port] " + + "to which to send REST requests"); + } + return cluster; + } /** * Helper class to check warnings in REST responses with sensitivity to versions @@ -530,7 +535,7 @@ private void wipeCluster() throws Exception { * the snapshots intact in the repository. * @return Map of repository name to list of snapshots found in unfinished state */ - private Map>> wipeSnapshots() throws IOException { + protected Map>> wipeSnapshots() throws IOException { final Map>> inProgressSnapshots = new HashMap<>(); for (Map.Entry repo : entityAsMap(adminClient.performRequest(new Request("GET", "/_snapshot/_all"))).entrySet()) { String repoName = repo.getKey(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index 93832833b7ff4..87e1f25336229 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -81,7 +81,7 @@ public TransportService createTransportService(Settings settings, ThreadPool thr @Nullable ClusterSettings clusterSettings, Set taskHeaders) { StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this), settings, this); - connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> nodeConnected(discoveryNode)); + connectionManager.setDefaultNodeConnectedBehavior(cm -> Collections.emptySet()); connectionManager.setDefaultGetConnectionBehavior((cm, discoveryNode) -> createConnection(discoveryNode)); return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, connectionManager); @@ -186,10 +186,6 @@ public void sendRequest(long requestId, String action, TransportRequest request, protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { } - protected boolean nodeConnected(DiscoveryNode discoveryNode) { - return true; - } - @Override public TransportStats getStats() { throw new UnsupportedOperationException(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index f917dc3cfb272..9a42fe4cd8ee5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -105,14 +105,33 @@ public static MockTransportService createNewService(Settings settings, Version v return createNewService(settings, mockTransport, version, threadPool, clusterSettings, Collections.emptySet()); } - public static MockNioTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) { + /** + * Returns a unique port range for this JVM starting from the computed base port + */ + public static String getPortRange() { + return getBasePort() + "-" + (getBasePort() + 99); // upper bound is inclusive + } + + protected static int getBasePort() { // some tests use MockTransportService to do network based testing. Yet, we run tests in multiple JVMs that means // concurrent tests could claim port that another JVM just released and if that test tries to simulate a disconnect it might // be smart enough to re-connect depending on what is tested. To reduce the risk, since this is very hard to debug we use // a different default port range per JVM unless the incoming settings override it // use a non-default base port otherwise some cluster in this JVM might reuse a port - int basePort = 10300 + (ESTestCase.TEST_WORKER_VM * 100); - settings = Settings.builder().put(TransportSettings.PORT.getKey(), basePort + "-" + (basePort + 100)).put(settings).build(); + + // We rely on Gradle implementation details here, the worker IDs are long values incremented by one for the + // lifespan of the daemon this means that they can get larger than the allowed port range. + // Ephemeral ports on Linux start at 32768 so we modulo to make sure that we don't exceed that. + // This is safe as long as we have fewer than 224 Gradle workers running in parallel + // See also: https://github.com/elastic/elasticsearch/issues/44134 + final String workerId = System.getProperty(ESTestCase.TEST_WORKER_SYS_PROPERTY); + final int startAt = workerId == null ? 0 : Math.floorMod(Long.valueOf(workerId), 223); + assert startAt >= 0 : "Unexpected test worker Id, resulting port range would be negative"; + return 10300 + (startAt * 100); + } + + public static MockNioTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) { + settings = Settings.builder().put(TransportSettings.PORT.getKey(), getPortRange()).put(settings).build(); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); return new MockNioTransport(settings, version, threadPool, new NetworkService(Collections.emptyList()), new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService()); @@ -481,15 +500,6 @@ public boolean addGetConnectionBehavior(StubbableConnectionManager.GetConnection return connectionManager().setDefaultGetConnectionBehavior(behavior); } - /** - * Adds a node connected behavior that is used for the given delegate address. - * - * @return {@code true} if no other node connected behavior was registered for this address before. - */ - public boolean addNodeConnectedBehavior(TransportAddress transportAddress, StubbableConnectionManager.NodeConnectedBehavior behavior) { - return connectionManager().addNodeConnectedBehavior(transportAddress, behavior); - } - /** * Adds a node connected behavior that is the default node connected behavior. * diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java index a14eaa691f43e..8f07bc19d0b11 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java @@ -28,6 +28,7 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportConnectionListener; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -35,15 +36,13 @@ public class StubbableConnectionManager extends ConnectionManager { private final ConnectionManager delegate; private final ConcurrentMap getConnectionBehaviors; - private final ConcurrentMap nodeConnectedBehaviors; private volatile GetConnectionBehavior defaultGetConnectionBehavior = ConnectionManager::getConnection; - private volatile NodeConnectedBehavior defaultNodeConnectedBehavior = ConnectionManager::nodeConnected; + private volatile NodeConnectedBehavior defaultNodeConnectedBehavior = ConnectionManager::connectedNodes; public StubbableConnectionManager(ConnectionManager delegate, Settings settings, Transport transport) { super(settings, transport); this.delegate = delegate; this.getConnectionBehaviors = new ConcurrentHashMap<>(); - this.nodeConnectedBehaviors = new ConcurrentHashMap<>(); } public boolean addConnectBehavior(TransportAddress transportAddress, GetConnectionBehavior connectBehavior) { @@ -56,10 +55,6 @@ public boolean setDefaultGetConnectionBehavior(GetConnectionBehavior behavior) { return prior == null; } - public boolean addNodeConnectedBehavior(TransportAddress transportAddress, NodeConnectedBehavior behavior) { - return nodeConnectedBehaviors.put(transportAddress, behavior) == null; - } - public boolean setDefaultNodeConnectedBehavior(NodeConnectedBehavior behavior) { NodeConnectedBehavior prior = defaultNodeConnectedBehavior; defaultNodeConnectedBehavior = behavior; @@ -69,13 +64,11 @@ public boolean setDefaultNodeConnectedBehavior(NodeConnectedBehavior behavior) { public void clearBehaviors() { defaultGetConnectionBehavior = ConnectionManager::getConnection; getConnectionBehaviors.clear(); - defaultNodeConnectedBehavior = ConnectionManager::nodeConnected; - nodeConnectedBehaviors.clear(); + defaultNodeConnectedBehavior = ConnectionManager::connectedNodes; } public void clearBehavior(TransportAddress transportAddress) { getConnectionBehaviors.remove(transportAddress); - nodeConnectedBehaviors.remove(transportAddress); } @Override @@ -92,9 +85,12 @@ public Transport.Connection getConnection(DiscoveryNode node) { @Override public boolean nodeConnected(DiscoveryNode node) { - TransportAddress address = node.getAddress(); - NodeConnectedBehavior behavior = nodeConnectedBehaviors.getOrDefault(address, defaultNodeConnectedBehavior); - return behavior.nodeConnected(delegate, node); + return defaultNodeConnectedBehavior.connectedNodes(delegate).contains(node); + } + + @Override + public Set connectedNodes() { + return defaultNodeConnectedBehavior.connectedNodes(delegate); } @Override @@ -136,6 +132,6 @@ public interface GetConnectionBehavior { @FunctionalInterface public interface NodeConnectedBehavior { - boolean nodeConnected(ConnectionManager connectionManager, DiscoveryNode discoveryNode); + Set connectedNodes(ConnectionManager connectionManager); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index e9d94f7b0dfc6..492de65de8a3c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -92,6 +92,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.elasticsearch.test.transport.MockTransportService.getPortRange; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -116,7 +117,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { protected volatile DiscoveryNode nodeB; protected volatile MockTransportService serviceB; - protected abstract MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake); + protected abstract Transport build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake); protected int channelsPerNodeConnection() { // This is a customized profile for this test case. @@ -175,13 +176,17 @@ public void onNodeDisconnected(DiscoveryNode node) { private MockTransportService buildService(final String name, final Version version, @Nullable ClusterSettings clusterSettings, Settings settings, boolean acceptRequests, boolean doHandshake) { Settings updatedSettings = Settings.builder() + .put(TransportSettings.PORT.getKey(), getPortRange()) .put(settings) .put(Node.NODE_NAME_SETTING.getKey(), name) .build(); if (clusterSettings == null) { clusterSettings = new ClusterSettings(updatedSettings, getSupportedSettings()); } - MockTransportService service = build(updatedSettings, version, clusterSettings, doHandshake); + Transport transport = build(updatedSettings, version, clusterSettings, doHandshake); + MockTransportService service = MockTransportService.createNewService(updatedSettings, transport, version, threadPool, + clusterSettings, Collections.emptySet()); + service.start(); if (acceptRequests) { service.acceptIncomingRequests(); } @@ -223,7 +228,7 @@ public void assertNoPendingHandshakes(Transport transport) { public void testHelloWorld() { - serviceA.registerRequestHandler("internal:sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", ThreadPool.Names.GENERIC, StringMessageRequest::new, (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); try { @@ -299,7 +304,7 @@ public void handleException(TransportException exp) { public void testThreadContext() throws ExecutionException, InterruptedException { - serviceA.registerRequestHandler("internal:ping_pong", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:ping_pong", ThreadPool.Names.GENERIC, StringMessageRequest::new, (request, channel, task) -> { assertEquals("ping_user", threadPool.getThreadContext().getHeader("test.ping.user")); assertNull(threadPool.getThreadContext().getTransient("my_private_context")); @@ -358,7 +363,7 @@ public void testLocalNodeConnection() throws InterruptedException { // this should be a noop serviceA.disconnectFromNode(nodeA); final AtomicReference exception = new AtomicReference<>(); - serviceA.registerRequestHandler("internal:localNode", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:localNode", ThreadPool.Names.GENERIC, StringMessageRequest::new, (request, channel, task) -> { try { channel.sendResponse(new StringMessageResponse(request.message)); @@ -411,9 +416,9 @@ public void testMessageListeners() throws Exception { } }; final String ACTION = "internal:action"; - serviceA.registerRequestHandler(ACTION, TransportRequest.Empty::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler(ACTION, ThreadPool.Names.GENERIC, TransportRequest.Empty::new, requestHandler); - serviceB.registerRequestHandler(ACTION, TransportRequest.Empty::new, ThreadPool.Names.GENERIC, + serviceB.registerRequestHandler(ACTION, ThreadPool.Names.GENERIC, TransportRequest.Empty::new, requestHandler); class CountingListener implements TransportMessageListener { @@ -528,7 +533,7 @@ public void testVoidMessageCompressed() { serviceC.start(); serviceC.acceptIncomingRequests(); - serviceA.registerRequestHandler("internal:sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", ThreadPool.Names.GENERIC, TransportRequest.Empty::new, (request, channel, task) -> { try { channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -581,7 +586,7 @@ public void testHelloWorldCompressed() throws IOException { serviceC.start(); serviceC.acceptIncomingRequests(); - serviceA.registerRequestHandler("internal:sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", ThreadPool.Names.GENERIC, StringMessageRequest::new, (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); try { @@ -631,7 +636,7 @@ public void handleException(TransportException exp) { } public void testErrorMessage() { - serviceA.registerRequestHandler("internal:sayHelloException", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHelloException", ThreadPool.Names.GENERIC, StringMessageRequest::new, (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); throw new RuntimeException("bad message !!!"); @@ -689,8 +694,8 @@ public void onNodeDisconnected(DiscoveryNode node) { public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierException, InterruptedException { Set sendingErrors = ConcurrentCollections.newConcurrentSet(); Set responseErrors = ConcurrentCollections.newConcurrentSet(); - serviceA.registerRequestHandler("internal:test", TestRequest::new, - randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, (request, channel, task) -> { + serviceA.registerRequestHandler("internal:test", randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, + TestRequest::new, (request, channel, task) -> { try { channel.sendResponse(new TestResponse((String) null)); } catch (Exception e) { @@ -706,7 +711,7 @@ public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierExcepti logger.trace("caught exception while responding from node B", e); } }; - serviceB.registerRequestHandler("internal:test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler); + serviceB.registerRequestHandler("internal:test", ThreadPool.Names.SAME, TestRequest::new, ignoringRequestHandler); int halfSenders = scaledRandomIntBetween(3, 10); final CyclicBarrier go = new CyclicBarrier(halfSenders * 2 + 1); @@ -792,7 +797,7 @@ public void onAfter() { // simulate restart of nodeB serviceB.close(); MockTransportService newService = buildService("TS_B_" + i, version1, Settings.EMPTY); - newService.registerRequestHandler("internal:test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler); + newService.registerRequestHandler("internal:test", ThreadPool.Names.SAME, TestRequest::new, ignoringRequestHandler); serviceB = newService; nodeB = newService.getLocalDiscoNode(); serviceB.connectToNode(nodeA); @@ -814,7 +819,7 @@ public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); final CountDownLatch latch3 = new CountDownLatch(1); try { - serviceA.registerRequestHandler("internal:foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:foobar", ThreadPool.Names.GENERIC, StringMessageRequest::new, (request, channel, task) -> { try { latch2.await(); @@ -843,7 +848,7 @@ public void testNotifyOnShutdown() throws Exception { } public void testTimeoutSendExceptionWithNeverSendingBackResponse() throws Exception { - serviceA.registerRequestHandler("internal:sayHelloTimeoutNoResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHelloTimeoutNoResponse", ThreadPool.Names.GENERIC, StringMessageRequest::new, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) { @@ -888,7 +893,7 @@ public void testTimeoutSendExceptionWithDelayedResponse() throws Exception { CountDownLatch waitForever = new CountDownLatch(1); CountDownLatch doneWaitingForever = new CountDownLatch(1); Semaphore inFlight = new Semaphore(Integer.MAX_VALUE); - serviceA.registerRequestHandler("internal:sayHelloTimeoutDelayedResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHelloTimeoutDelayedResponse", ThreadPool.Names.GENERIC, StringMessageRequest::new, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) throws InterruptedException { @@ -992,7 +997,9 @@ public void handleException(TransportException exp) { assertTrue(inFlight.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS)); } - @TestLogging(value = "org.elasticsearch.transport.TransportService.tracer:trace") + @TestLogging( + value = "org.elasticsearch.transport.TransportService.tracer:trace", + reason = "to ensure we log network events on TRACE level") public void testTracerLog() throws Exception { TransportRequestHandler handler = (request, channel, task) -> channel.sendResponse(new StringMessageResponse("")); TransportRequestHandler handlerWithError = (request, channel, task) -> { @@ -1024,12 +1031,12 @@ public String executor() { } }; - serviceA.registerRequestHandler("internal:test", StringMessageRequest::new, ThreadPool.Names.SAME, handler); - serviceA.registerRequestHandler("internal:testNotSeen", StringMessageRequest::new, ThreadPool.Names.SAME, handler); - serviceA.registerRequestHandler("internal:testError", StringMessageRequest::new, ThreadPool.Names.SAME, handlerWithError); - serviceB.registerRequestHandler("internal:test", StringMessageRequest::new, ThreadPool.Names.SAME, handler); - serviceB.registerRequestHandler("internal:testNotSeen", StringMessageRequest::new, ThreadPool.Names.SAME, handler); - serviceB.registerRequestHandler("internal:testError", StringMessageRequest::new, ThreadPool.Names.SAME, handlerWithError); + serviceA.registerRequestHandler("internal:test", ThreadPool.Names.SAME, StringMessageRequest::new, handler); + serviceA.registerRequestHandler("internal:testNotSeen", ThreadPool.Names.SAME, StringMessageRequest::new, handler); + serviceA.registerRequestHandler("internal:testError", ThreadPool.Names.SAME, StringMessageRequest::new, handlerWithError); + serviceB.registerRequestHandler("internal:test", ThreadPool.Names.SAME, StringMessageRequest::new, handler); + serviceB.registerRequestHandler("internal:testNotSeen", ThreadPool.Names.SAME, StringMessageRequest::new, handler); + serviceB.registerRequestHandler("internal:testError", ThreadPool.Names.SAME, StringMessageRequest::new, handlerWithError); String includeSettings; String excludeSettings; @@ -1127,7 +1134,10 @@ public static class StringMessageRequest extends TransportRequest { this.timeout = timeout; } - public StringMessageRequest() { + public StringMessageRequest(StreamInput in) throws IOException { + super(in); + message = in.readString(); + timeout = in.readLong(); } public StringMessageRequest(String message) { @@ -1138,13 +1148,6 @@ public long timeout() { return timeout; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - message = in.readString(); - timeout = in.readLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -1165,11 +1168,6 @@ static class StringMessageResponse extends TransportResponse { this.message = in.readString(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(message); @@ -1181,10 +1179,10 @@ public static class Version0Request extends TransportRequest { int value1; + Version0Request() {} - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + Version0Request(StreamInput in) throws IOException { + super(in); value1 = in.readInt(); } @@ -1199,9 +1197,10 @@ public static class Version1Request extends Version0Request { int value2; - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + Version1Request() {} + + Version1Request(StreamInput in) throws IOException { + super(in); if (in.getVersion().onOrAfter(version1)) { value2 = in.readInt(); } @@ -1228,10 +1227,7 @@ static class Version0Response extends TransportResponse { this.value1 = in.readInt(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } + @Override public void writeTo(StreamOutput out) throws IOException { @@ -1257,10 +1253,7 @@ static class Version1Response extends Version0Response { } } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } + @Override public void writeTo(StreamOutput out) throws IOException { @@ -1272,7 +1265,7 @@ public void writeTo(StreamOutput out) throws IOException { } public void testVersionFrom0to1() throws Exception { - serviceB.registerRequestHandler("internal:version", Version1Request::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:version", ThreadPool.Names.SAME, Version1Request::new, new TransportRequestHandler() { @Override public void messageReceived(Version1Request request, TransportChannel channel, Task task) throws Exception { @@ -1314,7 +1307,7 @@ public String executor() { } public void testVersionFrom1to0() throws Exception { - serviceA.registerRequestHandler("internal:version", Version0Request::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:version", ThreadPool.Names.SAME, Version0Request::new, new TransportRequestHandler() { @Override public void messageReceived(Version0Request request, TransportChannel channel, Task task) throws Exception { @@ -1358,7 +1351,7 @@ public String executor() { } public void testVersionFrom1to1() throws Exception { - serviceB.registerRequestHandler("internal:version", Version1Request::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:version", ThreadPool.Names.SAME, Version1Request::new, (request, channel, task) -> { assertThat(request.value1, equalTo(1)); assertThat(request.value2, equalTo(2)); @@ -1400,7 +1393,7 @@ public String executor() { } public void testVersionFrom0to0() throws Exception { - serviceA.registerRequestHandler("internal:version", Version0Request::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:version", ThreadPool.Names.SAME, Version0Request::new, (request, channel, task) -> { assertThat(request.value1, equalTo(1)); Version0Response response = new Version0Response(1); @@ -1438,7 +1431,7 @@ public String executor() { } public void testMockFailToSendNoConnectRule() throws Exception { - serviceA.registerRequestHandler("internal:sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", ThreadPool.Names.GENERIC, StringMessageRequest::new, (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); throw new RuntimeException("bad message !!!"); @@ -1495,7 +1488,7 @@ public void handleException(TransportException exp) { } public void testMockUnresponsiveRule() throws IOException { - serviceA.registerRequestHandler("internal:sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", ThreadPool.Names.GENERIC, StringMessageRequest::new, (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); throw new RuntimeException("bad message !!!"); @@ -1550,7 +1543,7 @@ public void testHostOnMessages() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(2); final AtomicReference addressA = new AtomicReference<>(); final AtomicReference addressB = new AtomicReference<>(); - serviceB.registerRequestHandler("internal:action1", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { + serviceB.registerRequestHandler("internal:action1", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { addressA.set(request.remoteAddress()); channel.sendResponse(new TestResponse((String) null)); latch.countDown(); @@ -1590,7 +1583,7 @@ public void testBlockingIncomingRequests() throws Exception { try (TransportService service = buildService("TS_TEST", version0, null, Settings.EMPTY, false, false)) { AtomicBoolean requestProcessed = new AtomicBoolean(false); - service.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, + service.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { requestProcessed.set(true); channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -1640,20 +1633,18 @@ public static class TestRequest extends TransportRequest { String info; int resendCount; - public TestRequest() { + public TestRequest() {} + + public TestRequest(StreamInput in) throws IOException { + super(in); + info = in.readOptionalString(); + resendCount = in.readInt(); } public TestRequest(String info) { this.info = info; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - info = in.readOptionalString(); - resendCount = in.readInt(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -1674,6 +1665,7 @@ private static class TestResponse extends TransportResponse { final String info; TestResponse(StreamInput in) throws IOException { + super(in); this.info = in.readOptionalString(); } @@ -1681,11 +1673,6 @@ private static class TestResponse extends TransportResponse { this.info = info; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(info); @@ -1800,11 +1787,11 @@ public String executor() { } } - serviceB.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceB.registerRequestHandler("internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), TestRequest::new, new TestRequestHandler(serviceB)); - serviceC.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceC.registerRequestHandler("internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), TestRequest::new, new TestRequestHandler(serviceC)); - serviceA.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceA.registerRequestHandler("internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), TestRequest::new, new TestRequestHandler(serviceA)); int iters = randomIntBetween(30, 60); CountDownLatch allRequestsDone = new CountDownLatch(iters); @@ -1868,19 +1855,20 @@ public String executor() { } public void testRegisterHandlerTwice() { - serviceB.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceB.registerRequestHandler("internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), TestRequest::new, (request, message, task) -> { throw new AssertionError("boom"); }); expectThrows(IllegalArgumentException.class, () -> - serviceB.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, - ThreadPool.Names.GENERIC), + serviceB.registerRequestHandler("internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + TestRequest::new, (request, message, task) -> { throw new AssertionError("boom"); }) ); - serviceA.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceA.registerRequestHandler("internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + TestRequest::new, (request, message, task) -> { throw new AssertionError("boom"); }); @@ -1992,7 +1980,7 @@ public void testKeepAlivePings() throws Exception { public void testTcpHandshake() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try (MockTransportService service = buildService("TS_BAD", Version.CURRENT, Settings.EMPTY)) { service.addMessageListener(new TransportMessageListener() { @Override public void onRequestReceived(long requestId, String action) { @@ -2083,7 +2071,7 @@ public void run() { public void testResponseHeadersArePreserved() throws InterruptedException { List executors = new ArrayList<>(ThreadPool.THREAD_POOL_TYPES.keySet()); CollectionUtil.timSort(executors); // makes sure it's reproducible - serviceA.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { threadPool.getThreadContext().putTransient("boom", new Object()); @@ -2144,7 +2132,7 @@ public void testHandlerIsInvokedOnConnectionClose() throws IOException, Interrup List executors = new ArrayList<>(ThreadPool.THREAD_POOL_TYPES.keySet()); CollectionUtil.timSort(executors); // makes sure it's reproducible TransportService serviceC = buildService("TS_C", CURRENT_VERSION, Settings.EMPTY); - serviceC.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, + serviceC.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { // do nothing }); @@ -2204,7 +2192,7 @@ public void testConcurrentDisconnectOnNonPublishedConnection() throws IOExceptio MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY); CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1); - serviceC.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, + serviceC.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @@ -2272,7 +2260,7 @@ public void testTransportStats() throws Exception { MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY); CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1); - serviceB.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @@ -2385,7 +2373,7 @@ public void testTransportStatsWithException() throws Exception { CountDownLatch sendResponseLatch = new CountDownLatch(1); Exception ex = new RuntimeException("boom"); ex.setStackTrace(new StackTraceElement[0]); - serviceB.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @@ -2654,6 +2642,17 @@ public void testProfilesIncludesDefault() { .toSet())); } + public void testBindUnavailableAddress() { + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportSettings.PORT.getKey(), port) + .build(); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, + () -> buildService("test", Version.CURRENT, settings)); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } + public void testChannelCloseWhileConnecting() { try (MockTransportService service = buildService("TS_C", version0, Settings.EMPTY)) { AtomicBoolean connectionClosedListenerCalled = new AtomicBoolean(false); @@ -2701,4 +2700,5 @@ protected InetSocketAddress getLocalEphemeral() throws UnknownHostException { protected Set getAcceptedChannels(TcpTransport transport) { return transport.getAcceptedChannels(); } + } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 336e69dcd86f4..800ba8f207448 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -67,6 +67,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.IntFunction; +import java.util.function.Supplier; import java.util.stream.Collectors; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; @@ -228,12 +229,25 @@ public MockSocketChannel createChannel(NioSelector selector, SocketChannel chann } @Override - public MockServerChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { + public MockServerChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) { MockServerChannel nioServerChannel = new MockServerChannel(channel); Consumer exceptionHandler = (e) -> logger.error(() -> new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); - ServerChannelContext context = new ServerChannelContext(nioServerChannel, this, selector, MockNioTransport.this::acceptChannel, - exceptionHandler); + ServerChannelContext context = new ServerChannelContext(nioServerChannel, null, selector, null, + exceptionHandler) { + @Override + public void acceptChannels(Supplier selectorSupplier) throws IOException { + int acceptCount = 0; + NioSocketChannel acceptedChannel; + while ((acceptedChannel = MockTcpChannelFactory.this.acceptNioChannel(this, selectorSupplier)) != null) { + acceptChannel(acceptedChannel); + ++acceptCount; + if (acceptCount % 100 == 0) { + logger.warn("Accepted [{}] connections in a single select loop iteration on [{}]", acceptCount, channel); + } + } + } + }; nioServerChannel.setContext(context); return nioServerChannel; } @@ -362,14 +376,17 @@ private void maybeLogElapsedTime(long startTime) { private void logLongRunningExecutions() { for (Map.Entry entry : registry.entrySet()) { - final long elapsedTimeInNanos = threadPool.relativeTimeInNanos() - entry.getValue(); + final Long blockedSinceInNanos = entry.getValue(); + final long elapsedTimeInNanos = threadPool.relativeTimeInNanos() - blockedSinceInNanos; if (elapsedTimeInNanos > warnThreshold) { final Thread thread = entry.getKey(); - logger.warn("Potentially blocked execution on network thread [{}] [{}] [{} milliseconds]: \n{}", - thread.getName(), - thread.getState(), - TimeUnit.NANOSECONDS.toMillis(elapsedTimeInNanos), - Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"))); + final String stackTrace = + Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n")); + final Thread.State threadState = thread.getState(); + if (blockedSinceInNanos == registry.get(thread)) { + logger.warn("Potentially blocked execution on network thread [{}] [{}] [{} milliseconds]: \n{}", + thread.getName(), threadState, TimeUnit.NANOSECONDS.toMillis(elapsedTimeInNanos), stackTrace); + } } } if (stopped == false) { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java index 069e19c34558c..4a1c6f5deb6a2 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java @@ -92,6 +92,30 @@ protected void registrationException(ChannelContext context, Exception except } } + @Override + protected void handleActive(ChannelContext context) throws IOException { + final boolean registered = transportThreadWatchdog.register(); + try { + super.handleActive(context); + } finally { + if (registered) { + transportThreadWatchdog.unregister(); + } + } + } + + @Override + protected void activeException(ChannelContext context, Exception exception) { + final boolean registered = transportThreadWatchdog.register(); + try { + super.activeException(context, exception); + } finally { + if (registered) { + transportThreadWatchdog.unregister(); + } + } + } + public void handleConnect(SocketChannelContext context) throws IOException { assert hasConnectedMap.contains(context) == false : "handleConnect should only be called is a channel is not yet connected"; final boolean registered = transportThreadWatchdog.register(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java index cc85ae0bad3e7..af525a4cdda27 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java @@ -256,7 +256,7 @@ public String executor() { } private void registerRequestHandler(TransportService transportService, TransportRequestHandler handler) { - transportService.registerRequestHandler("internal:dummy", () -> TransportRequest.Empty.INSTANCE, ThreadPool.Names.GENERIC, handler); + transportService.registerRequestHandler("internal:dummy", ThreadPool.Names.GENERIC, TransportRequest.Empty::new, handler); } private void send(TransportService transportService, DiscoveryNode destinationNode, diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java index af8df361d4cf3..2450b5658beda 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.test.test; -import com.carrotsearch.randomizedtesting.RandomizedTest; import junit.framework.AssertionFailedError; import org.elasticsearch.common.bytes.BytesReference; @@ -43,6 +42,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; public class ESTestCaseTests extends ESTestCase { @@ -185,8 +185,7 @@ public void testRandomValueOtherThan() { public void testWorkerSystemProperty() { assumeTrue("requires running tests with Gradle", System.getProperty("tests.gradle") != null); - // org.gradle.test.worker starts counting at 1 - assertThat(RandomizedTest.systemPropertyAsInt(TEST_WORKER_SYS_PROPERTY, -1), greaterThan(0)); - assertEquals(RandomizedTest.systemPropertyAsInt(TEST_WORKER_SYS_PROPERTY, -1) - 1, TEST_WORKER_VM); + + assertThat(ESTestCase.TEST_WORKER_VM_ID, not(equals(ESTestCase.DEFAULT_TEST_WORKER_ID))); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java index 2c031d9e3acff..b886d5727dea0 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java @@ -23,14 +23,20 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.listeners.LoggingListener; import org.junit.runner.Description; import org.junit.runner.Result; +import java.lang.annotation.Annotation; import java.lang.reflect.Method; +import java.util.Objects; +import java.util.stream.Stream; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.hasToString; public class LoggingListenerTests extends ESTestCase { @@ -43,9 +49,21 @@ public void testTestRunStartedSupportsClassInDefaultPackage() throws Exception { } public void testCustomLevelPerMethod() throws Exception { + runTestCustomLevelPerMethod(TestClass.class); + } + + public void testIssueCustomLevelPerMethod() throws Exception { + runTestCustomLevelPerMethod(TestIssueClass.class); + } + + public void testMixedCustomLevelPerMethod() throws Exception { + runTestCustomLevelPerMethod(TestMixedClass.class); + } + + private void runTestCustomLevelPerMethod(final Class clazz) throws Exception { LoggingListener loggingListener = new LoggingListener(); - Description suiteDescription = Description.createSuiteDescription(TestClass.class); + Description suiteDescription = Description.createSuiteDescription(clazz); Logger xyzLogger = LogManager.getLogger("xyz"); Logger abcLogger = LogManager.getLogger("abc"); @@ -58,9 +76,11 @@ public void testCustomLevelPerMethod() throws Exception { assertThat(xyzLogger.getLevel(), equalTo(level)); assertThat(abcLogger.getLevel(), equalTo(level)); - Method method = TestClass.class.getMethod("annotatedTestMethod"); - TestLogging annotation = method.getAnnotation(TestLogging.class); - Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation); + Method method = clazz.getMethod("annotatedTestMethod"); + TestLogging testLogging = method.getAnnotation(TestLogging.class); + TestIssueLogging testIssueLogging = method.getAnnotation(TestIssueLogging.class); + Annotation[] annotations = Stream.of(testLogging, testIssueLogging).filter(Objects::nonNull).toArray(Annotation[]::new); + Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotations); loggingListener.testStarted(testDescription); assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE)); assertThat(abcLogger.getLevel(), equalTo(level)); @@ -75,15 +95,29 @@ public void testCustomLevelPerMethod() throws Exception { } public void testCustomLevelPerClass() throws Exception { + runTestCustomLevelPerClass(AnnotatedTestClass.class); + } + + public void testIssueCustomLevelPerClass() throws Exception { + runTestCustomLevelPerClass(AnnotatedTestIssueClass.class); + } + + public void testCustomLevelPerClassMixed() throws Exception { + runTestCustomLevelPerClass(AnnotatedTestMixedClass.class); + } + + private void runTestCustomLevelPerClass(final Class clazz) throws Exception { LoggingListener loggingListener = new LoggingListener(); - Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class); + Description suiteDescription = Description.createSuiteDescription(clazz); Logger abcLogger = LogManager.getLogger("abc"); Logger xyzLogger = LogManager.getLogger("xyz"); - // we include foo and foo.bar to maintain that logging levels are applied from the top of the hierarchy down; this ensures that - // setting the logging level for a parent logger and a child logger applies the parent level first and then the child as otherwise - // setting the parent level would overwrite the child level + /* + * We include foo and foo.bar to maintain that logging levels are applied from the top of the hierarchy down. This ensures that + * setting the logging level for a parent logger and a child logger applies the parent level first and then the child as otherwise + * setting the parent level would overwrite the child level. + */ Logger fooLogger = LogManager.getLogger("foo"); Logger fooBarLogger = LogManager.getLogger("foo.bar"); @@ -120,9 +154,21 @@ public void testCustomLevelPerClass() throws Exception { } public void testCustomLevelPerClassAndPerMethod() throws Exception { + runTestCustomLevelPerClassAndPerMethod(AnnotatedTestClass.class); + } + + public void testIssueCustomLevelPerClassAndPerMethod() throws Exception { + runTestCustomLevelPerClassAndPerMethod(AnnotatedTestIssueClass.class); + } + + public void testCustomLevelPerClassAndPerMethodMixed() throws Exception { + runTestCustomLevelPerClassAndPerMethod(AnnotatedTestMixedClass.class); + } + + private void runTestCustomLevelPerClassAndPerMethod(final Class clazz) throws Exception { LoggingListener loggingListener = new LoggingListener(); - Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class); + Description suiteDescription = Description.createSuiteDescription(clazz); Logger abcLogger = LogManager.getLogger("abc"); Logger xyzLogger = LogManager.getLogger("xyz"); @@ -136,8 +182,10 @@ public void testCustomLevelPerClassAndPerMethod() throws Exception { assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); Method method = TestClass.class.getMethod("annotatedTestMethod"); - TestLogging annotation = method.getAnnotation(TestLogging.class); - Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation); + TestLogging testLogging = method.getAnnotation(TestLogging.class); + TestIssueLogging testIssueLogging = method.getAnnotation(TestIssueLogging.class); + Annotation[] annotations = Stream.of(testLogging, testIssueLogging).filter(Objects::nonNull).toArray(Annotation[]::new); + Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotations); loggingListener.testStarted(testDescription); assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); @@ -147,8 +195,10 @@ public void testCustomLevelPerClassAndPerMethod() throws Exception { assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); Method method2 = TestClass.class.getMethod("annotatedTestMethod2"); - TestLogging annotation2 = method2.getAnnotation(TestLogging.class); - Description testDescription2 = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod2", annotation2); + TestLogging testLogging2 = method2.getAnnotation(TestLogging.class); + TestIssueLogging testIssueLogging2 = method2.getAnnotation(TestIssueLogging.class); + Annotation[] annotations2 = Stream.of(testLogging2, testIssueLogging2).filter(Objects::nonNull).toArray(Annotation[]::new); + Description testDescription2 = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod2", annotations2); loggingListener.testStarted(testDescription2); assertThat(xyzLogger.getLevel(), equalTo(Level.DEBUG)); assertThat(abcLogger.getLevel(), equalTo(Level.TRACE)); @@ -163,9 +213,17 @@ public void testCustomLevelPerClassAndPerMethod() throws Exception { } public void testInvalidClassTestLoggingAnnotation() throws Exception { + runTestInvalidClassTestLoggingAnnotation(InvalidClass.class); + } + + public void testInvalidClassTestIssueLoggingAnnotation() throws Exception { + runTestInvalidClassTestLoggingAnnotation(InvalidIssueClass.class); + } + + private void runTestInvalidClassTestLoggingAnnotation(final Class clazz) { final LoggingListener loggingListener = new LoggingListener(); - final Description suiteDescription = Description.createSuiteDescription(InvalidClass.class); + final Description suiteDescription = Description.createSuiteDescription(clazz); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> loggingListener.testRunStarted(suiteDescription)); @@ -173,28 +231,65 @@ public void testInvalidClassTestLoggingAnnotation() throws Exception { } public void testInvalidMethodTestLoggingAnnotation() throws Exception { + runTestInvalidMethodTestLoggingAnnotation(InvalidTestLoggingMethod.class); + } + + public void testInvalidMethodTestIssueLoggingAnnotation() throws Exception { + runTestInvalidMethodTestLoggingAnnotation(InvalidTestIssueLoggingMethod.class); + } + + private void runTestInvalidMethodTestLoggingAnnotation(final Class clazz) throws Exception { final LoggingListener loggingListener = new LoggingListener(); - final Description suiteDescription = Description.createSuiteDescription(InvalidMethod.class); + final Description suiteDescription = Description.createSuiteDescription(clazz); loggingListener.testRunStarted(suiteDescription); - final Method method = InvalidMethod.class.getMethod("invalidMethod"); - final TestLogging annotation = method.getAnnotation(TestLogging.class); - Description testDescription = Description.createTestDescription(InvalidMethod.class, "invalidMethod", annotation); + final Method method = clazz.getMethod("invalidMethod"); + final TestLogging testLogging = method.getAnnotation(TestLogging.class); + final TestIssueLogging testIssueLogging = method.getAnnotation(TestIssueLogging.class); + final Annotation[] annotations = Stream.of(testLogging, testIssueLogging).filter(Objects::nonNull).toArray(Annotation[]::new); + Description testDescription = Description.createTestDescription(clazz, "invalidMethod", annotations); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> loggingListener.testStarted(testDescription)); assertThat(e.getMessage(), equalTo("invalid test logging annotation [abc:INFO:WARN]")); } + public void testDuplicateLoggerBetweenTestLoggingAndTestIssueLogging() throws Exception { + final LoggingListener loggingListener = new LoggingListener(); + + final Description suiteDescription = Description.createSuiteDescription(DuplicateLoggerBetweenTestLoggingAndTestIssueLogging.class); + + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> loggingListener.testRunStarted(suiteDescription)); + assertThat(e, hasToString(containsString("found intersection [abc] between TestLogging and TestIssueLogging"))); + } + /** * Dummy class used to create a JUnit suite description that has the {@link TestLogging} annotation. */ - @TestLogging("abc:WARN,foo:WARN,foo.bar:ERROR") + @TestLogging(value = "abc:WARN,foo:WARN,foo.bar:ERROR", reason = "testing TestLogging class annotations") public static class AnnotatedTestClass { } + /** + * Dummy class used to create a JUnit suite description that has the {@link TestIssueLogging} annotation. + */ + @TestIssueLogging(value = "abc:WARN,foo:WARN,foo.bar:ERROR", issueUrl = "https://example.com") + public static class AnnotatedTestIssueClass { + + } + + /** + * Dummy class used to create a JUnit suite description that has the {@link TestLogging} and {@link TestIssueLogging} annotations. + */ + @TestLogging(value = "abc:WARN,foo:WARN", reason = "testing TestLogging class annotations") + @TestIssueLogging(value = "foo.bar:ERROR", issueUrl = "https://example.com") + public static class AnnotatedTestMixedClass { + + } + /** * Dummy class used to create a JUnit suite description that doesn't have the {@link TestLogging} annotation, but its test methods have * it. @@ -202,13 +297,55 @@ public static class AnnotatedTestClass { public static class TestClass { @SuppressWarnings("unused") - @TestLogging("xyz:TRACE,foo:WARN,foo.bar:ERROR") + @TestLogging(value = "xyz:TRACE,foo:WARN,foo.bar:ERROR", reason = "testing TestLogging method annotations") + public void annotatedTestMethod() { + + } + + @SuppressWarnings("unused") + @TestLogging(value = "abc:TRACE,xyz:DEBUG", reason = "testing TestLogging method annotations") + public void annotatedTestMethod2() { + + } + + } + + /** + * Dummy class used to create a JUnit suite description that doesn't have the {@link TestIssueLogging} annotation, but its test methods + * have it. + */ + public static class TestIssueClass { + + @SuppressWarnings("unused") + @TestIssueLogging(value = "xyz:TRACE,foo:WARN,foo.bar:ERROR", issueUrl = "https://example.com") public void annotatedTestMethod() { } @SuppressWarnings("unused") - @TestLogging("abc:TRACE,xyz:DEBUG") + @TestIssueLogging(value = "abc:TRACE,xyz:DEBUG", issueUrl = "https://example.com") + public void annotatedTestMethod2() { + + } + + } + + /** + * Dummy class used to create a JUnit suite description that doesn't have the {@link TestLogging} annotation nor + * {@link TestIssueLogging}, but its test method have both. + */ + public static class TestMixedClass { + + @SuppressWarnings("unused") + @TestLogging(value = "xyz:TRACE,foo:WARN", reason = "testing TestLogging method annotations") + @TestIssueLogging(value ="foo.bar:ERROR", issueUrl = "https://example.com") + public void annotatedTestMethod() { + + } + + @SuppressWarnings("unused") + @TestLogging(value = "abc:TRACE", reason = "testing TestLogging method annotations") + @TestIssueLogging(value = "xyz:DEBUG", issueUrl = "https://example.com") public void annotatedTestMethod2() { } @@ -218,22 +355,52 @@ public void annotatedTestMethod2() { /** * Dummy class with an invalid {@link TestLogging} annotation. */ - @TestLogging("abc") + @TestLogging(value = "abc", reason = "testing an invalid TestLogging class annotation") public static class InvalidClass { } + /** + * Dummy class with an invalid {@link TestIssueLogging} annotation. + */ + @TestIssueLogging(value = "abc", issueUrl = "https://example.com") + public static class InvalidIssueClass { + + } + /** * Dummy class with an invalid {@link TestLogging} annotation on a method. */ - public static class InvalidMethod { + public static class InvalidTestLoggingMethod { @SuppressWarnings("unused") - @TestLogging("abc:INFO:WARN") + @TestLogging(value = "abc:INFO:WARN", reason = "testing an invalid TestLogging method annotation") public void invalidMethod() { } } + /** + * Dummy class with an invalid {@link TestIssueLogging} annotation on a method. + */ + public static class InvalidTestIssueLoggingMethod { + + @SuppressWarnings("unused") + @TestIssueLogging(value = "abc:INFO:WARN", issueUrl = "https://example.com") + public void invalidMethod() { + + } + + } + + /** + * Dummy class with duplicate logging levels between {@link TestLogging} and {@link TestIssueLogging} annotations. + */ + @TestLogging(value = "abc:INFO", reason = "testing a duplicate logger level between TestLogging and TestIssueLogging annotations") + @TestIssueLogging(value = "abc:DEBUG", issueUrl = "https://example.com") + public static class DuplicateLoggerBetweenTestLoggingAndTestIssueLogging { + + } + } diff --git a/test/framework/src/test/java/org/elasticsearch/test/transport/MockTransportServiceTests.java b/test/framework/src/test/java/org/elasticsearch/test/transport/MockTransportServiceTests.java new file mode 100644 index 0000000000000..5bb074e4b59b5 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/transport/MockTransportServiceTests.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.transport; + +import org.elasticsearch.test.ESTestCase; + +public class MockTransportServiceTests extends ESTestCase { + + public void testBasePortGradle() { + assumeTrue("requires running tests with Gradle", System.getProperty("tests.gradle") != null); + // Gradle worker IDs are 1 based + assertNotEquals(10300, MockTransportService.getBasePort()); + } + + public void testBasePortIDE() { + assumeTrue("requires running tests without Gradle", System.getProperty("tests.gradle") == null); + assertEquals(10300, MockTransportService.getBasePort()); + } + +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java index 344701b7b43b5..2ba2dadb8cc92 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java @@ -29,16 +29,11 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.Node; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; -import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportSettings; import java.io.IOException; import java.net.InetAddress; @@ -52,11 +47,11 @@ public class SimpleMockNioTransportTests extends AbstractSimpleTransportTestCase { - public static MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, - ClusterSettings clusterSettings, boolean doHandshake) { + @Override + protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); NetworkService networkService = new NetworkService(Collections.emptyList()); - Transport transport = new MockNioTransport(settings, version, threadPool, networkService, new MockPageCacheRecycler(settings), + return new MockNioTransport(settings, version, threadPool, networkService, new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService()) { @Override @@ -69,20 +64,6 @@ public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionP } } }; - MockTransportService mockTransportService = - MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); - mockTransportService.start(); - return mockTransportService; - } - - @Override - protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { - settings = Settings.builder().put(settings) - .put(TransportSettings.PORT.getKey(), "0") - .build(); - MockTransportService transportService = nioFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); - transportService.start(); - return transportService; } @Override @@ -102,24 +83,4 @@ public void testConnectException() throws UnknownHostException { assertThat(cause, instanceOf(IOException.class)); } } - - public void testBindUnavailableAddress() { - // this is on a lower level since it needs access to the TransportService before it's started - int port = serviceA.boundAddress().publishAddress().getPort(); - Settings settings = Settings.builder() - .put(Node.NODE_NAME_SETTING.getKey(), "foobar") - .put(TransportSettings.PORT.getKey(), port) - .build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { - MockTransportService transportService = nioFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true); - try { - transportService.start(); - } finally { - transportService.stop(); - transportService.close(); - } - }); - assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); - } } diff --git a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java index b8404ea76b6a2..35d2b5803cfe8 100644 --- a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java +++ b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java @@ -52,6 +52,7 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; import java.util.List; +import java.util.Set; import java.util.function.Consumer; import java.util.function.Predicate; @@ -60,13 +61,21 @@ public class ESLoggerUsageChecker { public static final Type THROWABLE_CLASS = Type.getType(Throwable.class); public static final Type STRING_CLASS = Type.getType(String.class); public static final Type STRING_ARRAY_CLASS = Type.getType(String[].class); - public static final Type PARAMETERIZED_MESSAGE_CLASS = Type.getType(ParameterizedMessage.class); + public static final Type OBJECT_CLASS = Type.getType(Object.class); public static final Type OBJECT_ARRAY_CLASS = Type.getType(Object[].class); public static final Type SUPPLIER_ARRAY_CLASS = Type.getType(Supplier[].class); public static final Type MARKER_CLASS = Type.getType(Marker.class); public static final List LOGGER_METHODS = Arrays.asList("trace", "debug", "info", "warn", "error", "fatal"); public static final String IGNORE_CHECKS_ANNOTATION = "org.elasticsearch.common.SuppressLoggerChecks"; + // types which are subject to checking when used in logger. TestMessage is also declared here to + // make sure this functionality works + public static final Set DEPRECATED_TYPES = Set.of( + Type.getObjectType("org/elasticsearch/common/logging/DeprecatedMessage"), + Type.getObjectType("org/elasticsearch/test/loggerusage/TestMessage") + ); + + public static final Type PARAMETERIZED_MESSAGE_CLASS = Type.getType(ParameterizedMessage.class); @SuppressForbidden(reason = "command line tool") public static void main(String... args) throws Exception { @@ -290,7 +299,17 @@ public void findBadLoggerUsages(MethodNode methodNode) { } } else if (insn.getOpcode() == Opcodes.INVOKESPECIAL) { // constructor invocation MethodInsnNode methodInsn = (MethodInsnNode) insn; - if (Type.getObjectType(methodInsn.owner).equals(PARAMETERIZED_MESSAGE_CLASS)) { + Type objectType = Type.getObjectType(methodInsn.owner); + + if (DEPRECATED_TYPES.contains(objectType)) { + Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc); + if (argumentTypes.length == 3 && + argumentTypes[0].equals(STRING_CLASS) && + argumentTypes[1].equals(STRING_CLASS) && + argumentTypes[2].equals(OBJECT_ARRAY_CLASS)) { + checkArrayArgs(methodNode, logMessageFrames[i], arraySizeFrames[i], lineNumber, methodInsn, 0, 2); + } + }else if (objectType.equals(PARAMETERIZED_MESSAGE_CLASS)) { Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc); if (argumentTypes.length == 2 && argumentTypes[0].equals(STRING_CLASS) && @@ -316,8 +335,10 @@ public void findBadLoggerUsages(MethodNode methodNode) { argumentTypes[2].equals(THROWABLE_CLASS)) { checkArrayArgs(methodNode, logMessageFrames[i], arraySizeFrames[i], lineNumber, methodInsn, 0, 1); } else { - throw new IllegalStateException("Constructor invoked on " + PARAMETERIZED_MESSAGE_CLASS.getClassName() + - " that is not supported by logger usage checker"); + throw new IllegalStateException("Constructor invoked on " + objectType + + " that is not supported by logger usage checker"+ + new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber, + "Constructor: "+ Arrays.toString(argumentTypes))); } } } diff --git a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java index ea60b0cf5b7ca..9221a16840de0 100644 --- a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java +++ b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java @@ -116,6 +116,20 @@ public void testLoggerUsageCheckerCompatibilityWithLog4j2Logger() throws NoSuchM assertEquals(5, ParameterizedMessage.class.getConstructors().length); } + public void checkForSubclasses() { + logger.debug(new TestMessage("message", "x-opaque-id")); + } + + public void checkArraySizeForSubclasses() { + logger.debug(new TestMessage("message {}", "x-opaque-id", 1)); + } + public void checkFailArraySizeForSubclasses(Object... arr) { + logger.debug(new TestMessage("message {}", "x-opaque-id", arr)); + } + + public void checkFailArraySize(String... arr) { + logger.debug(new ParameterizedMessage("text {}", (Object[])arr)); + } public void checkNumberOfArguments1() { logger.info("Hello {}", "world"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessRequest.java b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/TestMessage.java similarity index 61% rename from server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessRequest.java rename to test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/TestMessage.java index d6441bb8e77f5..fa25d506ba4dc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessRequest.java +++ b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/TestMessage.java @@ -16,18 +16,18 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.action.admin.cluster.node.liveness; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; +package org.elasticsearch.test.loggerusage; + +import org.apache.logging.log4j.message.ParameterizedMessage; /** - * Transport level private response for the transport handler registered under - * {@value org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction#NAME} + * This class is for testing that ESLoggerUsageChecker can find incorrect usages of LogMessages + * which are subclasses of ParametrizedMessage + * @see ESLoggerUsageTests */ -public final class LivenessRequest extends ActionRequest { - @Override - public ActionRequestValidationException validate() { - return null; +class TestMessage extends ParameterizedMessage { + TestMessage(String messagePattern, String xOpaqueId, Object... args) { + super(messagePattern, args); } } diff --git a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc index 0ace996f96d71..211ffefa3a069 100644 --- a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc @@ -72,6 +72,7 @@ A successful call returns an object with "cluster" and "index" fields. "manage_rollup", "manage_saml", "manage_security", + "manage_slm", "manage_token", "manage_watcher", "monitor", @@ -82,6 +83,7 @@ A successful call returns an object with "cluster" and "index" fields. "none", "read_ccr", "read_ilm", + "read_slm", "transport_client" ], "index" : [ diff --git a/x-pack/docs/en/security/auditing/index.asciidoc b/x-pack/docs/en/security/auditing/index.asciidoc index ba79779629a44..8e708efaffca9 100644 --- a/x-pack/docs/en/security/auditing/index.asciidoc +++ b/x-pack/docs/en/security/auditing/index.asciidoc @@ -1,12 +1,7 @@ - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/overview.asciidoc include::overview.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/event-types.asciidoc include::event-types.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/output-logfile.asciidoc include::output-logfile.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/auditing-search-queries.asciidoc include::auditing-search-queries.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc index e0de25d44ef1c..31f6e22316d16 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -31,17 +31,12 @@ be secured as well, or at least communicate with the cluster in a secured way: * {kibana-ref}/secure-reporting.html[Reporting] * {winlogbeat-ref}/securing-beats.html[Winlogbeat] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc include::ccs-clients-integrations/cross-cluster.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc include::ccs-clients-integrations/http.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/hadoop.asciidoc include::ccs-clients-integrations/hadoop.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc include::ccs-clients-integrations/beats.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc include::ccs-clients-integrations/monitoring.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc index 6912da6416e74..d917fc2fe850a 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc @@ -156,5 +156,4 @@ GET two:logs-2017.04/_search <1> // TEST[skip:todo] //TBD: Is there a missing description of the <1> callout above? -:edit_url: https://github.com/elastic/kibana/edit/{branch}/docs/security/cross-cluster-kibana.asciidoc include::{kib-repo-dir}/security/cross-cluster-kibana.asciidoc[] diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 94a295aa5640f..746880361a172 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -139,16 +139,12 @@ Events are logged to a dedicated `_audit.json` file in To walk through the configuration of {security-features} in {es}, {kib}, {ls}, and {metricbeat}, see {stack-ov}/security-getting-started.html[Getting started with security]. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc include::{es-repo-dir}/security/securing-communications/securing-elasticsearch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc include::{es-repo-dir}/security/securing-communications/configuring-tls-docker.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc include::{es-repo-dir}/security/securing-communications/enabling-cipher-suites.asciidoc[] -:edit_url: include::authentication/configuring-active-directory-realm.asciidoc[] include::authentication/configuring-file-realm.asciidoc[] include::authentication/configuring-ldap-realm.asciidoc[] @@ -156,10 +152,8 @@ include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::authentication/configuring-saml-realm.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc include::authentication/configuring-kerberos-realm.asciidoc[] -:edit_url: -include::fips-140-compliance.asciidoc[] include::{es-repo-dir}/security/reference/files.asciidoc[] +include::fips-140-compliance.asciidoc[] diff --git a/x-pack/docs/en/security/fips-140-compliance.asciidoc b/x-pack/docs/en/security/fips-140-compliance.asciidoc index 8c11503b3d3fb..14964fb648ca0 100644 --- a/x-pack/docs/en/security/fips-140-compliance.asciidoc +++ b/x-pack/docs/en/security/fips-140-compliance.asciidoc @@ -119,9 +119,6 @@ features are not available while running in fips mode. The list is as follows: enabled JVM (pointing `JAVA_HOME` environment variable to a different java installation) in order to generate the keys and certificates that can be later used in the FIPS 140-2 enabled JVM. -* The `elasticsearch-plugin` tool. Accordingly, `elasticsearch-plugin` can be - used with a different (non FIPS 140-2 enabled) Java installation if - available. * The SQL CLI client cannot run in a FIPS 140-2 enabled JVM while using TLS for transport security or PKI for client authentication. * The SAML Realm cannot decrypt and consume encrypted Assertions or encrypted diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index 3bb9793477b98..dc8375c109ce2 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -17,7 +17,6 @@ This section shows how to: The authentication of new nodes helps prevent a rogue node from joining the cluster and receiving data through replication. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/setting-up-ssl.asciidoc include::{es-repo-dir}/security/securing-communications/setting-up-ssl.asciidoc[] [[ciphers]] diff --git a/x-pack/docs/en/watcher/actions.asciidoc b/x-pack/docs/en/watcher/actions.asciidoc index 7f5cd3218b5d3..226bddfdcaa8f 100644 --- a/x-pack/docs/en/watcher/actions.asciidoc +++ b/x-pack/docs/en/watcher/actions.asciidoc @@ -302,25 +302,18 @@ PUT _watcher/watch/log_event_watch <1> A `condition` that only applies to the `notify_pager` action, which restricts its execution to when the condition succeeds (at least 5 hits in this case). -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/email.asciidoc include::actions/email.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/webhook.asciidoc include::actions/webhook.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/index.asciidoc include::actions/index.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/logging.asciidoc include::actions/logging.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/slack.asciidoc include::actions/slack.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/pagerduty.asciidoc include::actions/pagerduty.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/jira.asciidoc include::actions/jira.asciidoc[] [float] diff --git a/x-pack/docs/en/watcher/condition.asciidoc b/x-pack/docs/en/watcher/condition.asciidoc index 01f55f9b6682a..1935b4dc31d73 100644 --- a/x-pack/docs/en/watcher/condition.asciidoc +++ b/x-pack/docs/en/watcher/condition.asciidoc @@ -28,17 +28,12 @@ conditions are met. In addition to the watch wide condition, you can also configure conditions per <>. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/always.asciidoc include::condition/always.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/never.asciidoc include::condition/never.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/compare.asciidoc include::condition/compare.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/array-compare.asciidoc include::condition/array-compare.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/script.asciidoc include::condition/script.asciidoc[] diff --git a/x-pack/docs/en/watcher/customizing-watches.asciidoc b/x-pack/docs/en/watcher/customizing-watches.asciidoc index 895e9a89cc015..dac78450e6a44 100644 --- a/x-pack/docs/en/watcher/customizing-watches.asciidoc +++ b/x-pack/docs/en/watcher/customizing-watches.asciidoc @@ -47,7 +47,7 @@ You can use the `search` input to load Elasticsearch search results as the watch initial payload. A <> input contains a `request` object that specifies the -indices you want to search, the {ref}/search-request-search-type.html[search type], +indices you want to search, the {ref}/search-request-body.html#request-body-search-search-type[search type], and the search request body. The `body` field of a search input is the same as the body of an Elasticsearch `_search` request, making the full Elasticsearch Query DSL available for you to use. diff --git a/x-pack/docs/en/watcher/example-watches.asciidoc b/x-pack/docs/en/watcher/example-watches.asciidoc index 2a402b20261d7..2d747caba5cc4 100644 --- a/x-pack/docs/en/watcher/example-watches.asciidoc +++ b/x-pack/docs/en/watcher/example-watches.asciidoc @@ -9,8 +9,6 @@ For more example watches you can use as a starting point for building custom watches, see the https://github.com/elastic/examples/tree/master/Alerting[Example Watches] in the Elastic Examples repo. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc include::example-watches/example-watch-clusterstatus.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc include::example-watches/example-watch-meetupdata.asciidoc[] diff --git a/x-pack/docs/en/watcher/gs-index.asciidoc b/x-pack/docs/en/watcher/gs-index.asciidoc index e799adec40a34..c26789ca423a6 100644 --- a/x-pack/docs/en/watcher/gs-index.asciidoc +++ b/x-pack/docs/en/watcher/gs-index.asciidoc @@ -37,10 +37,10 @@ All of these use-cases share a few key properties: 3rd party system is notified, or the query results are stored. [float] -=== How Watches Work +=== How watches work -{xpack} provides an API for creating, managing and testing _watches_. A watch -describes a single alert and can contain multiple notification actions. +The {alert-features} provide an API for creating, managing and testing _watches_. +A watch describes a single alert and can contain multiple notification actions. A watch is constructed from four simple building blocks: diff --git a/x-pack/docs/en/watcher/index.asciidoc b/x-pack/docs/en/watcher/index.asciidoc index 782f0886affc2..0351d04c56b8c 100644 --- a/x-pack/docs/en/watcher/index.asciidoc +++ b/x-pack/docs/en/watcher/index.asciidoc @@ -3,7 +3,7 @@ [partintro] -- -{xpack} alerting is a set of administrative features that enable you to watch +The {alert-features} enable you to watch for changes or anomalies in your data and perform the necessary actions in response. For example, you might want to: @@ -38,10 +38,10 @@ All of these use-cases share a few key properties: 3rd party system is notified, or the query results are stored. [float] -=== How Watches Work +=== How watches work -{xpack} provides an API for creating, managing and testing _watches_. A watch -describes a single alert and can contain multiple notification actions. +The {alert-features} provide an API for creating, managing and testing _watches_. +A watch describes a single alert and can contain multiple notification actions. A watch is constructed from four simple building blocks: @@ -65,32 +65,26 @@ from the query, whether the condition was met, and what actions were taken. -- -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/getting-started.asciidoc include::getting-started.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/how-watcher-works.asciidoc include::how-watcher-works.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/encrypting-data.asciidoc include::encrypting-data.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input.asciidoc include::input.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/trigger.asciidoc include::trigger.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition.asciidoc include::condition.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions.asciidoc include::actions.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform.asciidoc include::transform.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/managing-watches.asciidoc include::managing-watches.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches.asciidoc include::example-watches.asciidoc[] + +include::troubleshooting.asciidoc[] + +include::limitations.asciidoc[] diff --git a/x-pack/docs/en/watcher/input.asciidoc b/x-pack/docs/en/watcher/input.asciidoc index 6dee849c735f9..d74f5cd80f1eb 100644 --- a/x-pack/docs/en/watcher/input.asciidoc +++ b/x-pack/docs/en/watcher/input.asciidoc @@ -19,14 +19,10 @@ execution context. NOTE: If you don't define an input for a watch, an empty payload is loaded into the execution context. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/simple.asciidoc include::input/simple.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/search.asciidoc include::input/search.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/http.asciidoc include::input/http.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/chain.asciidoc include::input/chain.asciidoc[] diff --git a/x-pack/docs/en/watcher/input/search.asciidoc b/x-pack/docs/en/watcher/input/search.asciidoc index d4548a159a640..1d21de7b8c1ea 100644 --- a/x-pack/docs/en/watcher/input/search.asciidoc +++ b/x-pack/docs/en/watcher/input/search.asciidoc @@ -9,7 +9,7 @@ supported attributes. In the search input's `request` object, you specify: * The indices you want to search -* The {ref}/search-request-search-type.html[search type] +* The {ref}/search-request-body.html#request-body-search-search-type[search type] * The search request body The search request body supports the full Elasticsearch Query DSL--it's the diff --git a/x-pack/docs/en/watcher/limitations.asciidoc b/x-pack/docs/en/watcher/limitations.asciidoc new file mode 100644 index 0000000000000..6b5c47886a403 --- /dev/null +++ b/x-pack/docs/en/watcher/limitations.asciidoc @@ -0,0 +1,24 @@ +[role="xpack"] +[[watcher-limitations]] +== Watcher limitations +[subs="attributes"] +++++ +Limitations +++++ + +[discrete] +=== Watches are not updated when file based scripts change + +When you refer to a file script in a watch, the watch itself is not updated +if you change the script on the filesystem. + +Currently, the only way to reload a file script in a watch is to delete +the watch and recreate it. + +[discrete] +=== Security integration + +When the {security-features} are enabled, a watch stores information about what +the user who stored the watch is allowed to execute **at that time**. This means, +if those permissions change over time, the watch will still be able to execute +with the permissions that existed when the watch was created. diff --git a/x-pack/docs/en/watcher/transform.asciidoc b/x-pack/docs/en/watcher/transform.asciidoc index 8241d7b0cb442..4fc6ea66b171a 100644 --- a/x-pack/docs/en/watcher/transform.asciidoc +++ b/x-pack/docs/en/watcher/transform.asciidoc @@ -56,11 +56,8 @@ part of the definition of the `my_webhook` action. <1> A watch level `transform` <2> An action level `transform` -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/search.asciidoc include::transform/search.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/script.asciidoc include::transform/script.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/chain.asciidoc include::transform/chain.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/watcher/transform/search.asciidoc b/x-pack/docs/en/watcher/transform/search.asciidoc index d7f468f183182..439164429c038 100644 --- a/x-pack/docs/en/watcher/transform/search.asciidoc +++ b/x-pack/docs/en/watcher/transform/search.asciidoc @@ -52,7 +52,7 @@ The following table lists all available settings for the search transform: |====== | Name |Required | Default | Description -| `request.search_type` | no | query_then_fetch | The search {ref}/search-request-search-type.html[type]. +| `request.search_type` | no | query_then_fetch | The search {ref}/search-request-body.html#request-body-search-search-type[type]. | `request.indices` | no | all indices | One or more indices to search on. diff --git a/x-pack/docs/en/watcher/trigger.asciidoc b/x-pack/docs/en/watcher/trigger.asciidoc index af830e829a45e..ee52dbba3bd7a 100644 --- a/x-pack/docs/en/watcher/trigger.asciidoc +++ b/x-pack/docs/en/watcher/trigger.asciidoc @@ -9,5 +9,4 @@ the trigger and triggering the watch when needed. {watcher} is designed to support different types of triggers, but only time-based <> triggers are currently available. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/trigger/schedule.asciidoc include::trigger/schedule.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule.asciidoc b/x-pack/docs/en/watcher/trigger/schedule.asciidoc index abbc3f5cfe8e5..7cd38c5fc9ba0 100644 --- a/x-pack/docs/en/watcher/trigger/schedule.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule.asciidoc @@ -26,23 +26,16 @@ once per minute. For more information about throttling, see * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/hourly.asciidoc include::schedule/hourly.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/daily.asciidoc include::schedule/daily.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/weekly.asciidoc include::schedule/weekly.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/monthly.asciidoc include::schedule/monthly.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/yearly.asciidoc include::schedule/yearly.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/cron.asciidoc include::schedule/cron.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/interval.asciidoc include::schedule/interval.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc index 57a6ebdfd92ef..af0f7319398f4 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc @@ -1,5 +1,5 @@ [[schedule-cron]] -==== `cron` Schedule +==== `cron` schedule A <> trigger that enables you to use a https://en.wikipedia.org/wiki/Cron[cron] style expression to specify when you @@ -213,7 +213,8 @@ minute during the weekend: [[croneval]] ===== Verifying Cron Expressions -{xpack} ships with a `elasticsearch-croneval` command line tool that you can use to verify that +The {es} {alert-features} provide a `elasticsearch-croneval` command line tool +that you can use to verify that your cron expressions are valid and produce the expected results. This tool is provided in the `$ES_HOME/bin` directory. diff --git a/x-pack/docs/en/watcher/troubleshooting.asciidoc b/x-pack/docs/en/watcher/troubleshooting.asciidoc new file mode 100644 index 0000000000000..0e6e463e35f1c --- /dev/null +++ b/x-pack/docs/en/watcher/troubleshooting.asciidoc @@ -0,0 +1,67 @@ +[role="xpack"] +[testenv="gold"] +[[watcher-troubleshooting]] +== Troubleshooting {watcher} +[subs="attributes"] +++++ +Troubleshooting +++++ + +[discrete] +=== Dynamic mapping error when trying to add a watch + +If you get the _Dynamic Mapping is Disabled_ error when you try to add a watch, +verify that the index mappings for the `.watches` index are available. You can +do that by submitting the following request: + +[source,js] +-------------------------------------------------- +GET .watches/_mapping +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +If the index mappings are missing, follow these steps to restore the correct +mappings: + +. Stop the Elasticsearch node. +. Add `xpack.watcher.index.rest.direct_access : true` to `elasticsearch.yml`. +. Restart the Elasticsearch node. +. Delete the `.watches` index: ++ +-- +[source,js] +-------------------------------------------------- +DELETE .watches +-------------------------------------------------- +// CONSOLE +// TEST[skip:index deletion] +-- +. Disable direct access to the `.watches` index: +.. Stop the Elasticsearch node. +.. Remove `xpack.watcher.index.rest.direct_access : true` from `elasticsearch.yml`. +.. Restart the Elasticsearch node. + +[discrete] +=== Unable to send email + +If you get an authentication error indicating that you need to continue the +sign-in process from a web browser when Watcher attempts to send email, you need +to configure Gmail to +https://support.google.com/accounts/answer/6010255?hl=en[Allow Less Secure Apps to access your account]. + +If you have two-step verification enabled for your email account, you must +generate and use an App Specific password to send email from {watcher}. For more +information, see: + +- Gmail: https://support.google.com/accounts/answer/185833?hl=en[Sign in using App Passwords] +- Outlook.com: http://windows.microsoft.com/en-us/windows/app-passwords-two-step-verification[App passwords and two-step verification] + +[discrete] +=== {watcher} not responsive + +Keep in mind that there's no built-in validation of scripts that you add to a +watch. Buggy or deliberately malicious scripts can negatively impact {watcher} +performance. For example, if you add multiple watches with buggy script +conditions in a short period of time, {watcher} might be temporarily unable to +process watches until the bad watches time out. \ No newline at end of file diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 630b43ed71e58..7473977f08da5 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -127,7 +127,7 @@ integTest.runner { } testClusters.integTest { - distribution = 'DEFAULT' // this is important since we use the reindex module in ML + testDistribution = 'DEFAULT' // this is important since we use the reindex module in ML setting 'xpack.ml.enabled', 'true' setting 'xpack.security.enabled', 'true' // Integration tests are supposed to enable/disable exporters before/after each test diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index dfc3b85dfe111..4b6c33f56ba09 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.OS + evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -22,6 +24,8 @@ task internalClusterTestNoSecurityManager(type: Test) { include noSecurityManagerITClasses systemProperty 'es.set.netty.runtime.available.processors', 'false' systemProperty 'tests.security.manager', 'false' + // Disable tests on windows https://github.com/elastic/elasticsearch/issues/44610 + onlyIf { OS.WINDOWS.equals(OS.current()) == false } } // Instead we create a separate task to run the @@ -34,6 +38,8 @@ task internalClusterTest(type: Test) { include '**/*IT.class' exclude noSecurityManagerITClasses systemProperty 'es.set.netty.runtime.available.processors', 'false' + // Disable tests on windows https://github.com/elastic/elasticsearch/issues/44610 + onlyIf { OS.WINDOWS.equals(OS.current()) == false } } check.dependsOn internalClusterTest diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index a45d3e1b32607..b2ad0c5054dac 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -19,7 +19,7 @@ task "leader-cluster"(type: RestIntegTestTask) { } } testClusters."leader-cluster" { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' } @@ -51,7 +51,7 @@ task "follow-cluster"(type: RestIntegTestTask) { } testClusters."follow-cluster" { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'cluster.remote.leader_cluster.seeds', { "\"${testClusters."leader-cluster".getAllTransportPortURI().join(",")}\"" } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index ae0d4247d7038..0524dac9354d9 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -20,7 +20,7 @@ task "leader-cluster"(type: RestIntegTestTask) { } testClusters."leader-cluster" { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' } @@ -35,7 +35,7 @@ task "middle-cluster"(type: RestIntegTestTask) { } } testClusters."middle-cluster" { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' setting 'cluster.remote.leader_cluster.seeds', { "\"${testClusters."leader-cluster".getAllTransportPortURI().join(",")}\"" } @@ -55,7 +55,7 @@ task 'follow-cluster'(type: RestIntegTestTask) { } testClusters."follow-cluster" { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'cluster.remote.leader_cluster.seeds', diff --git a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle index fa0c02eee3206..1ec89d1c4c5e1 100644 --- a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle @@ -19,7 +19,7 @@ task 'leader-cluster'(type: RestIntegTestTask) { } } testClusters.'leader-cluster' { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' } task 'follow-cluster'(type: RestIntegTestTask) { @@ -32,7 +32,7 @@ task 'follow-cluster'(type: RestIntegTestTask) { } } testClusters.'follow-cluster' { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' setting 'cluster.remote.leader_cluster.seeds', { "\"${testClusters.'leader-cluster'.getAllTransportPortURI().join(",")}\"" } diff --git a/x-pack/plugin/ccr/qa/rest/build.gradle b/x-pack/plugin/ccr/qa/rest/build.gradle index 1e4ca1abcbce0..d597aa38f372f 100644 --- a/x-pack/plugin/ccr/qa/rest/build.gradle +++ b/x-pack/plugin/ccr/qa/rest/build.gradle @@ -16,7 +16,7 @@ task restTest(type: RestIntegTestTask) { } testClusters.restTest { - distribution = 'default' + testDistribution = 'DEFAULT' // Disable assertions in FollowingEngineAssertions, otherwise an AssertionError is thrown before // indexing a document directly in a follower index. In a rest test we like to test the exception // that is thrown in production when indexing a document directly in a follower index. diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle index d29cb136e57b2..564b5f87e0b4b 100644 --- a/x-pack/plugin/ccr/qa/restart/build.gradle +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -14,7 +14,7 @@ task 'leader-cluster'(type: RestIntegTestTask) { } } testClusters.'leader-cluster' { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' } @@ -28,7 +28,7 @@ task 'follow-cluster'(type: RestIntegTestTask) { } } testClusters.'follow-cluster' { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'cluster.remote.leader_cluster.seeds', diff --git a/x-pack/plugin/ccr/qa/security/build.gradle b/x-pack/plugin/ccr/qa/security/build.gradle index a24dc8cd99fdc..2ac780caf2220 100644 --- a/x-pack/plugin/ccr/qa/security/build.gradle +++ b/x-pack/plugin/ccr/qa/security/build.gradle @@ -20,7 +20,7 @@ task 'leader-cluster'(type: RestIntegTestTask) { } testClusters.'leader-cluster' { - distribution = 'Default' + testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.monitoring.enabled', 'false' @@ -39,7 +39,7 @@ task 'follow-cluster'(type: RestIntegTestTask) { } testClusters.'follow-cluster' { - distribution = 'Default' + testDistribution = 'DEFAULT' setting 'cluster.remote.leader_cluster.seeds', { "\"${testClusters.'leader-cluster'.getAllTransportPortURI().join(",")}\"" } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index b926bc986510b..13749c2abb6d4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -179,16 +179,17 @@ public Collection createComponents( CcrRestoreSourceService restoreSourceService = new CcrRestoreSourceService(threadPool, ccrSettings); this.restoreSourceService.set(restoreSourceService); return Arrays.asList( + ccrLicenseChecker, + restoreSourceService, + new CcrRepositoryManager(settings, clusterService, client), + new AutoFollowCoordinator( + settings, + client, + clusterService, ccrLicenseChecker, - restoreSourceService, - new CcrRepositoryManager(settings, clusterService, client), - new AutoFollowCoordinator( - settings, - client, - clusterService, - ccrLicenseChecker, - threadPool::relativeTimeInMillis, - threadPool::absoluteTimeInMillis)); + threadPool::relativeTimeInMillis, + threadPool::absoluteTimeInMillis, + threadPool.executor(Ccr.CCR_THREAD_POOL_NAME))); } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java index 286d524f60b4a..8adb6140be099 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -7,10 +7,10 @@ package org.elasticsearch.xpack.ccr; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; @@ -45,6 +45,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Arrays; import java.util.Collections; @@ -61,7 +62,7 @@ /** * Encapsulates licensing checking for CCR. */ -public final class CcrLicenseChecker { +public class CcrLicenseChecker { private final BooleanSupplier isCcrAllowed; private final BooleanSupplier isAuthAllowed; @@ -307,9 +308,12 @@ public void hasPrivilegesToFollowIndices(final Client remoteClient, final String return; } - ThreadContext threadContext = remoteClient.threadPool().getThreadContext(); - SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); - String username = securityContext.getUser().principal(); + final User user = getUser(remoteClient); + if (user == null) { + handler.accept(new IllegalStateException("missing or unable to read authentication info on request")); + return; + } + String username = user.principal(); RoleDescriptor.IndicesPrivileges privileges = RoleDescriptor.IndicesPrivileges.builder() .indices(indices) @@ -344,6 +348,12 @@ public void hasPrivilegesToFollowIndices(final Client remoteClient, final String remoteClient.execute(HasPrivilegesAction.INSTANCE, request, ActionListener.wrap(responseHandler, handler)); } + User getUser(final Client remoteClient) { + final ThreadContext threadContext = remoteClient.threadPool().getThreadContext(); + final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + return securityContext.getUser(); + } + public static Client wrapClient(Client client, Map headers) { if (headers.isEmpty()) { return client; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index eb0a0bceb89a7..13dc84b858243 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -55,6 +55,7 @@ import java.util.Objects; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.Executor; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -78,6 +79,7 @@ public class AutoFollowCoordinator extends AbstractLifecycleComponent implements private final CcrLicenseChecker ccrLicenseChecker; private final LongSupplier relativeMillisTimeProvider; private final LongSupplier absoluteMillisTimeProvider; + private final Executor executor; private volatile TimeValue waitForMetadataTimeOut; private volatile Map autoFollowers = Collections.emptyMap(); @@ -89,18 +91,20 @@ public class AutoFollowCoordinator extends AbstractLifecycleComponent implements private final LinkedHashMap> recentAutoFollowErrors; public AutoFollowCoordinator( - Settings settings, - Client client, - ClusterService clusterService, - CcrLicenseChecker ccrLicenseChecker, - LongSupplier relativeMillisTimeProvider, - LongSupplier absoluteMillisTimeProvider) { + final Settings settings, + final Client client, + final ClusterService clusterService, + final CcrLicenseChecker ccrLicenseChecker, + final LongSupplier relativeMillisTimeProvider, + final LongSupplier absoluteMillisTimeProvider, + final Executor executor) { this.client = client; this.clusterService = clusterService; this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker, "ccrLicenseChecker"); this.relativeMillisTimeProvider = relativeMillisTimeProvider; this.absoluteMillisTimeProvider = absoluteMillisTimeProvider; + this.executor = Objects.requireNonNull(executor); this.recentAutoFollowErrors = new LinkedHashMap>() { @Override protected boolean removeEldestEntry(final Map.Entry> eldest) { @@ -210,7 +214,7 @@ void updateAutoFollowers(ClusterState followerClusterState) { Map newAutoFollowers = new HashMap<>(newRemoteClusters.size()); for (String remoteCluster : newRemoteClusters) { AutoFollower autoFollower = - new AutoFollower(remoteCluster, this::updateStats, clusterService::state, relativeMillisTimeProvider) { + new AutoFollower(remoteCluster, this::updateStats, clusterService::state, relativeMillisTimeProvider, executor) { @Override void getRemoteClusterState(final String remoteCluster, @@ -332,6 +336,7 @@ abstract static class AutoFollower { private final Consumer> statsUpdater; private final Supplier followerClusterStateSupplier; private final LongSupplier relativeTimeProvider; + private final Executor executor; private volatile long lastAutoFollowTimeInMillis = -1; private volatile long metadataVersion = 0; @@ -344,11 +349,13 @@ abstract static class AutoFollower { AutoFollower(final String remoteCluster, final Consumer> statsUpdater, final Supplier followerClusterStateSupplier, - LongSupplier relativeTimeProvider) { + final LongSupplier relativeTimeProvider, + final Executor executor) { this.remoteCluster = remoteCluster; this.statsUpdater = statsUpdater; this.followerClusterStateSupplier = followerClusterStateSupplier; this.relativeTimeProvider = relativeTimeProvider; + this.executor = Objects.requireNonNull(executor); } void start() { @@ -387,6 +394,7 @@ void start() { this.autoFollowPatternsCountDown = new CountDown(patterns.size()); this.autoFollowResults = new AtomicArray<>(patterns.size()); + final Thread thread = Thread.currentThread(); getRemoteClusterState(remoteCluster, metadataVersion + 1, (remoteClusterStateResponse, remoteError) -> { // Also check removed flag here, as it may take a while for this remote cluster state api call to return: if (removed) { @@ -403,7 +411,7 @@ void start() { } ClusterState remoteClusterState = remoteClusterStateResponse.getState(); metadataVersion = remoteClusterState.metaData().version(); - autoFollowIndices(autoFollowMetadata, clusterState, remoteClusterState, patterns); + autoFollowIndices(autoFollowMetadata, clusterState, remoteClusterState, patterns, thread); } else { assert remoteError != null; if (remoteError instanceof NoSuchRemoteClusterException) { @@ -414,7 +422,7 @@ void start() { for (int i = 0; i < patterns.size(); i++) { String autoFollowPatternName = patterns.get(i); - finalise(i, new AutoFollowResult(autoFollowPatternName, remoteError)); + finalise(i, new AutoFollowResult(autoFollowPatternName, remoteError), thread); } } }); @@ -428,7 +436,8 @@ void stop() { private void autoFollowIndices(final AutoFollowMetadata autoFollowMetadata, final ClusterState clusterState, final ClusterState remoteClusterState, - final List patterns) { + final List patterns, + final Thread thread) { int i = 0; for (String autoFollowPatternName : patterns) { final int slot = i; @@ -439,7 +448,7 @@ private void autoFollowIndices(final AutoFollowMetadata autoFollowMetadata, final List leaderIndicesToFollow = getLeaderIndicesToFollow(autoFollowPattern, remoteClusterState, followedIndices); if (leaderIndicesToFollow.isEmpty()) { - finalise(slot, new AutoFollowResult(autoFollowPatternName)); + finalise(slot, new AutoFollowResult(autoFollowPatternName), thread); } else { List> patternsForTheSameRemoteCluster = autoFollowMetadata.getPatterns() .entrySet().stream() @@ -448,7 +457,7 @@ private void autoFollowIndices(final AutoFollowMetadata autoFollowMetadata, .map(item -> new Tuple<>(item.getKey(), item.getValue())) .collect(Collectors.toList()); - Consumer resultHandler = result -> finalise(slot, result); + Consumer resultHandler = result -> finalise(slot, result, thread); checkAutoFollowPattern(autoFollowPatternName, remoteCluster, autoFollowPattern, leaderIndicesToFollow, headers, patternsForTheSameRemoteCluster, remoteClusterState.metaData(), clusterState.metaData(), resultHandler); } @@ -561,11 +570,23 @@ private void followLeaderIndex(String autoFollowPattenName, createAndFollow(headers, request, successHandler, onResult); } - private void finalise(int slot, AutoFollowResult result) { + private void finalise(int slot, AutoFollowResult result, final Thread thread) { assert autoFollowResults.get(slot) == null; autoFollowResults.set(slot, result); if (autoFollowPatternsCountDown.countDown()) { statsUpdater.accept(autoFollowResults.asList()); + /* + * In the face of a failure, we could be called back on the same thread. That is, it could be that we + * never fired off the asynchronous remote cluster state call, instead failing beforehand. In this case, + * we will recurse on the same thread. If there are repeated failures, we could blow the stack and + * overflow. A real-world scenario in which this can occur is if the local connect queue is full. To + * avoid this, if we are called back on the same thread, then we truncate the stack by forking to + * another thread. + */ + if (thread == Thread.currentThread()) { + executor.execute(this::start); + return; + } start(); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index e10ccb0f143eb..39b11f08ec69b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -58,12 +58,7 @@ public class ShardChangesAction extends ActionType public static final String NAME = "indices:data/read/xpack/ccr/shard_changes"; private ShardChangesAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, ShardChangesAction.Response::new); } public static class Request extends SingleShardRequest { @@ -285,11 +280,6 @@ public long getTookInMillis() { this.tookInMillis = tookInMillis; } - @Override - public void readFrom(final StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(final StreamOutput out) throws IOException { out.writeVLong(mappingVersion); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 3a699dda7c338..ced081ec0edf6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -185,6 +185,10 @@ protected void innerUpdateSettings(final LongConsumer finalHandler, final Consum } return existingSettings.get(s) == null || existingSettings.get(s).equals(settings.get(s)) == false; }); + if (updatedSettings.isEmpty()) { + finalHandler.accept(leaderIMD.getSettingsVersion()); + return; + } // Figure out whether the updated settings are all dynamic settings and // if so just update the follower index's settings: if (updatedSettings.keySet().stream().allMatch(indexScopedSettings::isDynamicSetting)) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java index 8f14f367bc608..3ca2974a9918e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java @@ -67,11 +67,6 @@ protected String executor() { return Ccr.CCR_THREAD_POOL_NAME; } - @Override - protected CcrStatsAction.Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected CcrStatsAction.Response read(StreamInput in) throws IOException { return new CcrStatsAction.Response(in); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java index 3623017ac5d35..34d8400e1d68f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java @@ -52,11 +52,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, DeleteAutoFollowPatternAction.Request request, ClusterState state, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java index 64782cda63615..6dac7ba24d54e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java @@ -23,9 +23,9 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; -import org.elasticsearch.xpack.core.ccr.action.FollowParameters; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction.Response.FollowerInfo; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction.Response.Status; +import org.elasticsearch.xpack.core.ccr.action.FollowParameters; import java.io.IOException; import java.util.ArrayList; @@ -48,11 +48,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected FollowInfoAction.Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected FollowInfoAction.Response read(StreamInput in) throws IOException { return new FollowInfoAction.Response(in); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java index 6359815ced443..12339bd0a530e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java @@ -47,11 +47,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected GetAutoFollowPatternAction.Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected GetAutoFollowPatternAction.Response read(StreamInput in) throws IOException { return new GetAutoFollowPatternAction.Response(in); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java index 12c0aec2bf3e7..52b2e8bbdae68 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java @@ -55,11 +55,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, PauseFollowAction.Request request, ClusterState state, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index ee616b423f23a..c453d45e37349 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -71,11 +71,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, PutAutoFollowPatternAction.Request request, ClusterState state, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 37d33b5187c43..62736405b39b8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -83,11 +83,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected PutFollowAction.Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected PutFollowAction.Response read(StreamInput in) throws IOException { return new PutFollowAction.Response(in); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 5d9aa0cc8076e..28aedc91ff9f9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -107,11 +107,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterBlockException checkBlock(ResumeFollowAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java index 4e1337d884319..b2d85f7dd7511 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java @@ -80,11 +80,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation( Task task, final UnfollowAction.Request request, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java index 02af69d9ac3ae..4da457bb8668a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java @@ -5,20 +5,14 @@ */ package org.elasticsearch.xpack.ccr.action.bulk; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class BulkShardOperationsAction extends StreamableResponseActionType { +public class BulkShardOperationsAction extends ActionType { public static final BulkShardOperationsAction INSTANCE = new BulkShardOperationsAction(); public static final String NAME = "indices:data/write/bulk_shard_operations[s]"; private BulkShardOperationsAction() { - super(NAME); + super(NAME, BulkShardOperationsResponse::new); } - - @Override - public BulkShardOperationsResponse newResponse() { - return new BulkShardOperationsResponse(); - } - } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java index f05a616c956b8..844769e49cd2b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java @@ -50,11 +50,6 @@ public long getMaxSeqNoOfUpdatesOrDeletes() { return maxSeqNoOfUpdatesOrDeletes; } - @Override - public void readFrom(final StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java index 0c72f02fde169..f244369caebf4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java @@ -37,15 +37,14 @@ public void setMaxSeqNo(final long maxSeqNo) { public BulkShardOperationsResponse() { } - @Override - public void setForcedRefresh(final boolean forcedRefresh) { + public BulkShardOperationsResponse(StreamInput in) throws IOException { + super(in); + globalCheckpoint = in.readZLong(); + maxSeqNo = in.readZLong(); } @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - globalCheckpoint = in.readZLong(); - maxSeqNo = in.readZLong(); + public void setForcedRefresh(final boolean forcedRefresh) { } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java index 5f8f1d5368a62..2a8123c17e9f1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.seqno.SeqNoStats; @@ -190,8 +191,8 @@ public static WriteReplicaResult shardOperationOnRep } @Override - protected BulkShardOperationsResponse newResponseInstance() { - return new BulkShardOperationsResponse(); + protected BulkShardOperationsResponse newResponseInstance(StreamInput in) throws IOException { + return new BulkShardOperationsResponse(in); } /** diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java index c5c2237edfc65..ec8bb500fc34e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportActionProxy; @@ -29,12 +28,7 @@ public class ClearCcrRestoreSessionAction extends ActionType getResponseReader() { - return ClearCcrRestoreSessionResponse::new; + super(NAME, ClearCcrRestoreSessionAction.ClearCcrRestoreSessionResponse::new); } public static class TransportDeleteCcrRestoreSessionAction diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionRequest.java index b9d277ca1b49a..9cc9ca05b0959 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionRequest.java @@ -21,7 +21,7 @@ public class ClearCcrRestoreSessionRequest extends ActionRequest implements Remo private String sessionUUID; ClearCcrRestoreSessionRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); sessionUUID = in.readString(); } @@ -35,11 +35,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java index ee26388d5ddb4..aa0b384acc6b3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -27,12 +26,7 @@ public class DeleteInternalCcrRepositoryAction extends ActionType getResponseReader() { - return DeleteInternalCcrRepositoryResponse::new; + super(NAME, DeleteInternalCcrRepositoryAction.DeleteInternalCcrRepositoryResponse::new); } public static class TransportDeleteInternalRepositoryAction diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryRequest.java index 12264c1d57c85..eefecbcc549b5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryRequest.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -27,11 +26,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("DeleteInternalRepositoryRequest cannot be serialized for sending across the wire."); - } - @Override public void writeTo(StreamOutput out) throws IOException { throw new UnsupportedOperationException("DeleteInternalRepositoryRequest cannot be serialized for sending across the wire."); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index 5fb169dd3334c..9f348c5a470b4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.ccr.action.repositories; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.bytes.BytesReference; @@ -16,7 +16,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.tasks.Task; @@ -33,15 +32,9 @@ public class GetCcrRestoreFileChunkAction extends ActionType getResponseReader() { - return GetCcrRestoreFileChunkResponse::new; - } - - public static class TransportGetCcrRestoreFileChunkAction extends HandledTransportAction { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkRequest.java index 5da0efcb372f5..39baeeb73f4ad 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkRequest.java @@ -51,11 +51,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(size); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException(); - } - String getSessionUUID() { return sessionUUID; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java index 16dd856538c52..f98254f8ac5e8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java @@ -36,12 +36,7 @@ public class PutCcrRestoreSessionAction extends ActionType getResponseReader() { - return PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse::new; + super(NAME, PutCcrRestoreSessionResponse::new); } public static class TransportPutCcrRestoreSessionAction @@ -95,9 +90,6 @@ public static class PutCcrRestoreSessionResponse extends ActionResponse { private Store.MetadataSnapshot storeFileMetaData; private long mappingVersion; - PutCcrRestoreSessionResponse() { - } - PutCcrRestoreSessionResponse(DiscoveryNode node, Store.MetadataSnapshot storeFileMetaData, long mappingVersion) { this.node = node; this.storeFileMetaData = storeFileMetaData; @@ -111,14 +103,6 @@ public static class PutCcrRestoreSessionResponse extends ActionResponse { mappingVersion = in.readVLong(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - node = new DiscoveryNode(in); - storeFileMetaData = new Store.MetadataSnapshot(in); - mappingVersion = in.readVLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java index d5c32d8fcd4f7..264c5adcc10a4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -27,12 +26,7 @@ public class PutInternalCcrRepositoryAction extends ActionType getResponseReader() { - return PutInternalCcrRepositoryResponse::new; + super(NAME, PutInternalCcrRepositoryAction.PutInternalCcrRepositoryResponse::new); } public static class TransportPutInternalRepositoryAction diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryRequest.java index 71efcdf319da9..e41728b5b8279 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryRequest.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -29,11 +28,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("PutInternalRepositoryRequest cannot be serialized for sending across the wire."); - } - @Override public void writeTo(StreamOutput out) throws IOException { throw new UnsupportedOperationException("PutInternalRepositoryRequest cannot be serialized for sending across the wire."); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseCheckerTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseCheckerTests.java new file mode 100644 index 0000000000000..bf49c6ab8372f --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseCheckerTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.client.Client; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class CcrLicenseCheckerTests extends ESTestCase { + + public void testNoAuthenticationInfo() { + final boolean isCcrAllowed = randomBoolean(); + final CcrLicenseChecker checker = new CcrLicenseChecker(() -> isCcrAllowed, () -> true) { + + @Override + User getUser(final Client remoteClient) { + return null; + } + + }; + final AtomicBoolean invoked = new AtomicBoolean(); + checker.hasPrivilegesToFollowIndices( + mock(Client.class), + new String[]{randomAlphaOfLength(8)}, + e -> { + invoked.set(true); + assertThat(e, instanceOf(IllegalStateException.class)); + assertThat(e, hasToString(containsString("missing or unable to read authentication info on request"))); + }); + assertTrue(invoked.get()); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 665d45ad25950..a2d08c62d5bc0 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ccr; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; @@ -99,6 +100,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -1059,6 +1061,46 @@ public void onFailure(String source, Exception e) { }); } + public void testReplicatePrivateSettingsOnly() throws Exception { + assertAcked(leaderClient().admin().indices().prepareCreate("leader").setSource( + getIndexSettings(1, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")), XContentType.JSON)); + ensureLeaderGreen("leader"); + followerClient().execute(PutFollowAction.INSTANCE, putFollow("leader", "follower")).get(); + final ClusterService clusterService = getLeaderCluster().getInstance(ClusterService.class, getLeaderCluster().getMasterName()); + final SetOnce settingVersionOnLeader = new SetOnce<>(); + final CountDownLatch latch = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + final IndexMetaData indexMetaData = currentState.metaData().index("leader"); + Settings.Builder settings = Settings.builder().put(indexMetaData.getSettings()); + settings.put(PrivateSettingPlugin.INDEX_PRIVATE_SETTING.getKey(), "internal-value"); + settings.put(PrivateSettingPlugin.INDEX_INTERNAL_SETTING.getKey(), "internal-value"); + final MetaData.Builder metadata = MetaData.builder(currentState.metaData()) + .put(IndexMetaData.builder(indexMetaData) + .settingsVersion(indexMetaData.getSettingsVersion() + 1) + .settings(settings).build(), true); + return ClusterState.builder(currentState).metaData(metadata).build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + settingVersionOnLeader.set(newState.metaData().index("leader").getSettingsVersion()); + latch.countDown(); + } + + @Override + public void onFailure(String source, Exception e) { + throw new AssertionError(e); + } + }); + latch.await(); + assertBusy(() -> assertThat(getFollowTaskSettingsVersion("follower"), equalTo(settingVersionOnLeader.get()))); + GetSettingsResponse resp = followerClient().admin().indices().prepareGetSettings("follower").get(); + assertThat(resp.getSetting("follower", PrivateSettingPlugin.INDEX_INTERNAL_SETTING.getKey()), nullValue()); + assertThat(resp.getSetting("follower", PrivateSettingPlugin.INDEX_PRIVATE_SETTING.getKey()), nullValue()); + } + public void testMustCloseIndexAndPauseToRestartWithPutFollowing() throws Exception { final int numberOfPrimaryShards = randomIntBetween(1, 3); final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 7648d10aa0915..83827c9e86d4f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -21,6 +21,9 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; @@ -41,6 +44,9 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -92,7 +98,7 @@ public void testAutoFollower() { assertThat(entries.get(0).getKey().getName(), equalTo("logs-20190101")); assertThat(entries.get(0).getValue(), nullValue()); }; - AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(currentState), () -> 1L) { + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(currentState), () -> 1L, Runnable::run) { @Override void getRemoteClusterState(String remoteCluster, long metadataVersion, @@ -157,7 +163,7 @@ public void testAutoFollowerClusterStateApiFailure() { assertThat(results.get(0).clusterStateFetchException, sameInstance(failure)); assertThat(results.get(0).autoFollowExecutionResults.entrySet().size(), equalTo(0)); }; - AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(clusterState), () -> 1L) { + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(clusterState), () -> 1L, Runnable::run) { @Override void getRemoteClusterState(String remoteCluster, long metadataVersion, @@ -212,7 +218,7 @@ public void testAutoFollowerUpdateClusterStateFailure() { assertThat(entries.get(0).getKey().getName(), equalTo("logs-20190101")); assertThat(entries.get(0).getValue(), sameInstance(failure)); }; - AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(clusterState), () -> 1L) { + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(clusterState), () -> 1L, Runnable::run) { @Override void getRemoteClusterState(String remoteCluster, long metadataVersion, @@ -269,7 +275,7 @@ public void testAutoFollowerCreateAndFollowApiCallFailure() { assertThat(entries.get(0).getKey().getName(), equalTo("logs-20190101")); assertThat(entries.get(0).getValue(), sameInstance(failure)); }; - AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(clusterState), () -> 1L) { + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(clusterState), () -> 1L, Runnable::run) { @Override void getRemoteClusterState(String remoteCluster, long metadataVersion, @@ -538,7 +544,9 @@ public void testStats() { null, mockClusterService(), new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, () -> 1L); + () -> 1L, + () -> 1L, + Runnable::run); autoFollowCoordinator.updateStats(Collections.singletonList( new AutoFollowCoordinator.AutoFollowResult("_alias1")) @@ -603,7 +611,9 @@ public void testUpdateAutoFollowers() { null, clusterService, new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, () -> 1L); + () -> 1L, + () -> 1L, + Runnable::run); // Add 3 patterns: Map patterns = new HashMap<>(); patterns.put("pattern1", new AutoFollowPattern("remote1", Collections.singletonList("logs-*"), null, null, null, @@ -671,7 +681,9 @@ public void testUpdateAutoFollowersNoPatterns() { null, mockClusterService(), new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, () -> 1L); + () -> 1L, + () -> 1L, + Runnable::run); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()))) @@ -686,7 +698,9 @@ public void testUpdateAutoFollowersNoAutoFollowMetadata() { null, mockClusterService(), new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, () -> 1L); + () -> 1L, + () -> 1L, + Runnable::run); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")).build(); autoFollowCoordinator.updateAutoFollowers(clusterState); assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().size(), equalTo(0)); @@ -719,7 +733,7 @@ public void testWaitForMetadataVersion() { List allResults = new ArrayList<>(); Consumer> handler = allResults::addAll; - AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(states), () -> 1L) { + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(states), () -> 1L, Runnable::run) { long previousRequestedMetadataVersion = 0; @@ -777,7 +791,7 @@ public void testWaitForTimeOut() { fail("should not be invoked"); }; AtomicInteger counter = new AtomicInteger(); - AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(states), () -> 1L) { + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(states), () -> 1L, Runnable::run) { long previousRequestedMetadataVersion = 0; @@ -831,7 +845,7 @@ public void testAutoFollowerSoftDeletesDisabled() { List results = new ArrayList<>(); Consumer> handler = results::addAll; - AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(currentState), () -> 1L) { + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(currentState), () -> 1L, Runnable::run) { @Override void getRemoteClusterState(String remoteCluster, long metadataVersion, @@ -907,7 +921,7 @@ public void testAutoFollowerFollowerIndexAlreadyExists() { Consumer> handler = results -> { resultHolder[0] = results; }; - AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(currentState), () -> 1L) { + AutoFollower autoFollower = new AutoFollower("remote", handler, localClusterStateSupplier(currentState), () -> 1L, Runnable::run) { @Override void getRemoteClusterState(String remoteCluster, long metadataVersion, @@ -952,6 +966,85 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa assertThat(entries.get(0).getValue(), nullValue()); } + /* + * This tests for a situation where in the face of repeated failures we would be called back on the same thread, and + * then recurse through the start method again, and eventually stack overflow. Now when we are called back on the + * same thread, we fork a new thread to avoid this. This test simulates a repeated failure to exercise this logic + * and ensures that we do not stack overflow. If we did stack overflow, it would go as an uncaught exception and + * fail the test. We have sufficiently high iterations here to ensure that we would indeed stack overflow were it + * not for this logic. + */ + public void testRepeatedFailures() throws InterruptedException { + final ClusterState clusterState = mock(ClusterState.class); + final MetaData metaData = mock(MetaData.class); + when(clusterState.metaData()).thenReturn(metaData); + final AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("*"), + "{}", + 0, + 0, + 0, + 0, + ByteSizeValue.ZERO, + ByteSizeValue.ZERO, + 0, + ByteSizeValue.ZERO, + TimeValue.ZERO, + TimeValue.ZERO); + final AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(Map.of("remote", pattern), Map.of(), Map.of()); + when(metaData.custom(AutoFollowMetadata.TYPE)).thenReturn(autoFollowMetadata); + + final int iterations = randomIntBetween(16384, 32768); // sufficiently large to exercise that we do not stack overflow + final AtomicInteger counter = new AtomicInteger(); + final CountDownLatch latch = new CountDownLatch(1); + final ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + final AutoFollower autoFollower = new AutoFollower("remote", x -> {}, () -> clusterState, () -> 1, executor) { + + @Override + void getRemoteClusterState( + final String remoteCluster, + final long metadataVersion, + final BiConsumer handler) { + counter.incrementAndGet(); + if (counter.incrementAndGet() > iterations) { + this.stop(); + latch.countDown(); + /* + * Do not call back the handler here, when we unlatch the test thread it will shutdown the + * executor which would lead to the execution of the callback facing a rejected execution + * exception (from the executor being shutdown). + */ + return; + } + handler.accept(null, new EsRejectedExecutionException()); + } + + @Override + void createAndFollow( + final Map headers, + final PutFollowAction.Request followRequest, + final Runnable successHandler, + final Consumer failureHandler) { + + } + + @Override + void updateAutoFollowMetadata( + final Function updateFunction, + final Consumer handler) { + + } + + }; + autoFollower.start(); + latch.await(); + } finally { + executor.shutdown(); + } + } + private static ClusterState createRemoteClusterState(String indexName, boolean enableSoftDeletes) { Settings.Builder indexSettings; indexSettings = settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), enableSoftDeletes); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java index 9f6850fe20fc7..aeabcb325b996 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; @@ -183,7 +184,7 @@ public void testIndexNotFound() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicReference reference = new AtomicReference<>(); final ShardChangesAction.TransportAction transportAction = node().injector().getInstance(ShardChangesAction.TransportAction.class); - transportAction.execute( + ActionTestUtils.execute(transportAction, null, new ShardChangesAction.Request(new ShardId(new Index("non-existent", "uuid"), 0), "uuid"), new ActionListener() { @Override @@ -208,7 +209,7 @@ public void testShardNotFound() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicReference reference = new AtomicReference<>(); final ShardChangesAction.TransportAction transportAction = node().injector().getInstance(ShardChangesAction.TransportAction.class); - transportAction.execute( + ActionTestUtils.execute(transportAction, null, new ShardChangesAction.Request(new ShardId(indexService.getMetaData().getIndex(), numberOfShards), "uuid"), new ActionListener() { @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java index 41356285935cc..666d29d01b297 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java @@ -7,7 +7,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class DeleteLicenseAction extends ActionType { @@ -15,11 +14,7 @@ public class DeleteLicenseAction extends ActionType { public static final String NAME = "cluster:admin/xpack/license/delete"; private DeleteLicenseAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java index e93320c6a434a..34f316cd1be50 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.license; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class GetBasicStatusAction extends StreamableResponseActionType { +public class GetBasicStatusAction extends ActionType { public static final GetBasicStatusAction INSTANCE = new GetBasicStatusAction(); public static final String NAME = "cluster:admin/xpack/license/basic_status"; private GetBasicStatusAction() { - super(NAME); - } - - @Override - public GetBasicStatusResponse newResponse() { - return new GetBasicStatusResponse(); + super(NAME, GetBasicStatusResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusResponse.java index 21f8c51afa6ba..274d254133b85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusResponse.java @@ -18,7 +18,9 @@ public class GetBasicStatusResponse extends ActionResponse implements ToXContent private boolean eligibleToStartBasic; - GetBasicStatusResponse() { + GetBasicStatusResponse(StreamInput in) throws IOException { + super(in); + eligibleToStartBasic = in.readBoolean(); } public GetBasicStatusResponse(boolean eligibleToStartBasic) { @@ -29,11 +31,6 @@ boolean isEligibleToStartBasic() { return eligibleToStartBasic; } - @Override - public void readFrom(StreamInput in) throws IOException { - eligibleToStartBasic = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(eligibleToStartBasic); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java index 3584e89834c55..19cd6b64f05d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.license; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class GetLicenseAction extends StreamableResponseActionType { +public class GetLicenseAction extends ActionType { public static final GetLicenseAction INSTANCE = new GetLicenseAction(); public static final String NAME = "cluster:monitor/xpack/license/get"; private GetLicenseAction() { - super(NAME); - } - - @Override - public GetLicenseResponse newResponse() { - return new GetLicenseResponse(); + super(NAME, GetLicenseResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseResponse.java index abfc1e06daa47..b84ba1647e42e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseResponse.java @@ -15,7 +15,11 @@ public class GetLicenseResponse extends ActionResponse { private License license; - GetLicenseResponse() { + GetLicenseResponse(StreamInput in) throws IOException { + super(in); + if (in.readBoolean()) { + license = License.readLicense(in); + } } GetLicenseResponse(License license) { @@ -26,14 +30,6 @@ public License license() { return license; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - if (in.readBoolean()) { - license = License.readLicense(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { if (license == null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java index be00612ef62a0..5ad5a9f44c29c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.license; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class GetTrialStatusAction extends StreamableResponseActionType { +public class GetTrialStatusAction extends ActionType { public static final GetTrialStatusAction INSTANCE = new GetTrialStatusAction(); public static final String NAME = "cluster:admin/xpack/license/trial_status"; private GetTrialStatusAction() { - super(NAME); - } - - @Override - public GetTrialStatusResponse newResponse() { - return new GetTrialStatusResponse(); + super(NAME, GetTrialStatusResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusResponse.java index 1f6c28b3603eb..0c0c4914a3af0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusResponse.java @@ -18,7 +18,9 @@ public class GetTrialStatusResponse extends ActionResponse implements ToXContent private boolean eligibleToStartTrial; - GetTrialStatusResponse() { + GetTrialStatusResponse(StreamInput in) throws IOException { + super(in); + eligibleToStartTrial = in.readBoolean(); } public GetTrialStatusResponse(boolean eligibleToStartTrial) { @@ -29,11 +31,6 @@ boolean isEligibleToStartTrial() { return eligibleToStartTrial; } - @Override - public void readFrom(StreamInput in) throws IOException { - eligibleToStartTrial = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(eligibleToStartTrial); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java index 3087a97b8aecd..8cd336619544d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.license; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class PostStartBasicAction extends ActionType { @@ -14,11 +13,6 @@ public class PostStartBasicAction extends ActionType { public static final String NAME = "cluster:admin/xpack/license/start_basic"; private PostStartBasicAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return PostStartBasicResponse::new; + super(NAME, PostStartBasicResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java index 35867c1413e1b..5188655643066 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java @@ -16,6 +16,13 @@ public class PostStartBasicRequest extends AcknowledgedRequest { +public class PostStartTrialAction extends ActionType { public static final PostStartTrialAction INSTANCE = new PostStartTrialAction(); public static final String NAME = "cluster:admin/xpack/license/start_trial"; private PostStartTrialAction() { - super(NAME); - } - - @Override - public PostStartTrialResponse newResponse() { - return new PostStartTrialResponse(); + super(NAME, PostStartTrialResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java index 2420fb68de169..ca28bd75468cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java @@ -17,6 +17,14 @@ public class PostStartTrialRequest extends MasterNodeRequest acknowledgeMessages; private String acknowledgeMessage; - PostStartTrialResponse() { - } - - PostStartTrialResponse(Status status) { - this(status, Collections.emptyMap(), null); - } - - PostStartTrialResponse(Status status, Map acknowledgeMessages, String acknowledgeMessage) { - this.status = status; - this.acknowledgeMessages = acknowledgeMessages; - this.acknowledgeMessage = acknowledgeMessage; - } - - public Status getStatus() { - return status; - } - - @Override - public void readFrom(StreamInput in) throws IOException { + PostStartTrialResponse(StreamInput in) throws IOException { + super(in); status = in.readEnum(Status.class); acknowledgeMessage = in.readOptionalString(); int size = in.readVInt(); @@ -85,6 +68,20 @@ public void readFrom(StreamInput in) throws IOException { this.acknowledgeMessages = acknowledgeMessages; } + PostStartTrialResponse(Status status) { + this(status, Collections.emptyMap(), null); + } + + PostStartTrialResponse(Status status, Map acknowledgeMessages, String acknowledgeMessage) { + this.status = status; + this.acknowledgeMessages = acknowledgeMessages; + this.acknowledgeMessage = acknowledgeMessage; + } + + public Status getStatus() { + return status; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeEnum(status); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java index 120a2ae505d40..b6610eec29013 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.license; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.protocol.xpack.license.PutLicenseResponse; public class PutLicenseAction extends ActionType { @@ -15,11 +14,6 @@ public class PutLicenseAction extends ActionType { public static final String NAME = "cluster:admin/xpack/license/put"; private PutLicenseAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return PutLicenseResponse::new; + super(NAME, PutLicenseResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequest.java index 6657adee41d36..a101b543ca2f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequest.java @@ -21,6 +21,12 @@ public class PutLicenseRequest extends AcknowledgedRequest { private License license; private boolean acknowledge = false; + public PutLicenseRequest(StreamInput in) throws IOException { + super(in); + license = License.readLicense(in); + acknowledge = in.readBoolean(); + } + public PutLicenseRequest() { } @@ -61,13 +67,6 @@ public boolean acknowledged() { return acknowledge; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - license = License.readLicense(in); - acknowledge = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java index 4209a99d3330c..beb3008f21b68 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java @@ -34,7 +34,7 @@ public TransportDeleteLicenseAction(TransportService transportService, ClusterSe LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(DeleteLicenseAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, DeleteLicenseRequest::new); + DeleteLicenseRequest::new, indexNameExpressionResolver); this.licenseService = licenseService; } @@ -48,11 +48,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterBlockException checkBlock(DeleteLicenseRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java index a22041c4a4125..2705173b39c7f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java @@ -14,10 +14,13 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class TransportGetBasicStatusAction extends TransportMasterNodeReadAction { @Inject @@ -34,8 +37,8 @@ protected String executor() { } @Override - protected GetBasicStatusResponse newResponse() { - return new GetBasicStatusResponse(); + protected GetBasicStatusResponse read(StreamInput in) throws IOException { + return new GetBasicStatusResponse(in); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java index cda8a57fdba2d..f39d9bd34158d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java @@ -15,11 +15,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class TransportGetLicenseAction extends TransportMasterNodeReadAction { private final LicenseService licenseService; @@ -39,8 +42,8 @@ protected String executor() { } @Override - protected GetLicenseResponse newResponse() { - return new GetLicenseResponse(); + protected GetLicenseResponse read(StreamInput in) throws IOException { + return new GetLicenseResponse(in); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java index f31e6977663c4..47f2dc3303a7d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java @@ -14,10 +14,13 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class TransportGetTrialStatusAction extends TransportMasterNodeReadAction { @Inject @@ -33,8 +36,8 @@ protected String executor() { } @Override - protected GetTrialStatusResponse newResponse() { - return new GetTrialStatusResponse(); + protected GetTrialStatusResponse read(StreamInput in) throws IOException { + return new GetTrialStatusResponse(in); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java index 5368936d2e9dc..5ccd7d0befe30 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java @@ -30,7 +30,7 @@ public TransportPostStartBasicAction(TransportService transportService, ClusterS LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(PostStartBasicAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, PostStartBasicRequest::new); + PostStartBasicRequest::new, indexNameExpressionResolver); this.licenseService = licenseService; } @@ -39,11 +39,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected PostStartBasicResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected PostStartBasicResponse read(StreamInput in) throws IOException { return new PostStartBasicResponse(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartTrialAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartTrialAction.java index 5a181eea4b620..020c0ff3a0f1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartTrialAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartTrialAction.java @@ -14,10 +14,13 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public class TransportPostStartTrialAction extends TransportMasterNodeAction { private final LicenseService licenseService; @@ -27,7 +30,7 @@ public TransportPostStartTrialAction(TransportService transportService, ClusterS LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(PostStartTrialAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, PostStartTrialRequest::new); + PostStartTrialRequest::new, indexNameExpressionResolver); this.licenseService = licenseService; } @@ -37,8 +40,8 @@ protected String executor() { } @Override - protected PostStartTrialResponse newResponse() { - return new PostStartTrialResponse(); + protected PostStartTrialResponse read(StreamInput in) throws IOException { + return new PostStartTrialResponse(in); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPutLicenseAction.java index a85a13427da93..eb53d0c4f9449 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPutLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPutLicenseAction.java @@ -31,8 +31,8 @@ public class TransportPutLicenseAction extends TransportMasterNodeAction + * Spatial features are available in for all license types except + * {@link OperationMode#MISSING} + * + * @return {@code true} as long as the license is valid. Otherwise + * {@code false}. + */ + public boolean isSpatialAllowed() { + // status is volatile + Status localStatus = status; + // Should work on all active licenses + return localStatus.active; + } + public synchronized boolean isTrialLicense() { return status.mode == OperationMode.TRIAL; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java index 41f066daf93d3..4deb63c060d14 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java @@ -43,6 +43,17 @@ public static EnumSet toSet(String... categories) { public XPackInfoRequest() {} + public XPackInfoRequest(StreamInput in) throws IOException { + // NOTE: this does *not* call super, THIS IS A BUG + this.verbose = in.readBoolean(); + EnumSet categories = EnumSet.noneOf(Category.class); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + categories.add(Category.valueOf(in.readString())); + } + this.categories = categories; + } + public void setVerbose(boolean verbose) { this.verbose = verbose; } @@ -64,17 +75,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - this.verbose = in.readBoolean(); - EnumSet categories = EnumSet.noneOf(Category.class); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - categories.add(Category.valueOf(in.readString())); - } - this.categories = categories; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(verbose); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index 2aeed6b2d0fff..789ab376e8473 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -43,7 +43,12 @@ public class XPackInfoResponse extends ActionResponse implements ToXContentObjec @Nullable private LicenseInfo licenseInfo; @Nullable private FeatureSetsInfo featureSetsInfo; - public XPackInfoResponse() {} + public XPackInfoResponse(StreamInput in) throws IOException { + super(in); + this.buildInfo = in.readOptionalWriteable(BuildInfo::new); + this.licenseInfo = in.readOptionalWriteable(LicenseInfo::new); + this.featureSetsInfo = in.readOptionalWriteable(FeatureSetsInfo::new); + } public XPackInfoResponse(@Nullable BuildInfo buildInfo, @Nullable LicenseInfo licenseInfo, @Nullable FeatureSetsInfo featureSetsInfo) { this.buildInfo = buildInfo; @@ -80,13 +85,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(featureSetsInfo); } - @Override - public void readFrom(StreamInput in) throws IOException { - this.buildInfo = in.readOptionalWriteable(BuildInfo::new); - this.licenseInfo = in.readOptionalWriteable(LicenseInfo::new); - this.featureSetsInfo = in.readOptionalWriteable(FeatureSetsInfo::new); - } - @Override public boolean equals(Object other) { if (other == null || other.getClass() != getClass()) return false; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java index 83621a9ac3d41..61e3923b94ef2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java @@ -7,9 +7,18 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; public class XPackUsageRequest extends MasterNodeRequest { + public XPackUsageRequest() {} + + public XPackUsageRequest(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeRequest.java new file mode 100644 index 0000000000000..f4ce44e6b9799 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeRequest.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.frozen; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.CollectionUtils; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class FreezeRequest extends AcknowledgedRequest + implements IndicesRequest.Replaceable { + private String[] indices; + private boolean freeze = true; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + + public FreezeRequest(String... indices) { + this.indices = indices; + } + + public FreezeRequest(StreamInput in) throws IOException { + super(in); + indicesOptions = IndicesOptions.readIndicesOptions(in); + indices = in.readStringArray(); + freeze = in.readBoolean(); + waitForActiveShards = ActiveShardCount.readFrom(in); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (CollectionUtils.isEmpty(indices)) { + validationException = addValidationError("index is missing", validationException); + } + return validationException; + } + + public FreezeRequest setFreeze(boolean freeze) { + this.freeze = freeze; + return this; + } + + public boolean freeze() { + return freeze; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + indicesOptions.writeIndicesOptions(out); + out.writeStringArray(indices); + out.writeBoolean(freeze); + waitForActiveShards.writeTo(out); + } + + /** + * @return the indices to be frozen or unfrozen + */ + @Override + public String[] indices() { + return indices; + } + + /** + * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. + * For example indices that don't exist. + * + * @return the current behaviour when it comes to index names and wildcard indices expressions + */ + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + /** + * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. + * For example indices that don't exist. + * + * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions + * @return the request itself + */ + public FreezeRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + @Override + public IndicesRequest indices(String... indices) { + this.indices = indices; + return this; + } + + public ActiveShardCount waitForActiveShards() { + return waitForActiveShards; + } + + /** + * Sets the number of shard copies that should be active for indices opening to return. + * Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy + * (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to + * wait for all shards (primary and all replicas) to be active before returning. + * Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any + * non-negative integer, up to the number of copies per shard (number of replicas + 1), + * to wait for the desired amount of shard copies to become active before returning. + * Indices opening will only wait up until the timeout value for the number of shard copies + * to be active before returning. Check {@link OpenIndexResponse#isShardsAcknowledged()} to + * determine if the requisite shard copies were all started before returning or timing out. + * + * @param waitForActiveShards number of active shard copies to wait on + */ + public FreezeRequest waitForActiveShards(ActiveShardCount waitForActiveShards) { + this.waitForActiveShards = waitForActiveShards; + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeResponse.java new file mode 100644 index 0000000000000..bb2f2a141964e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeResponse.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.frozen; + +import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class FreezeResponse extends OpenIndexResponse { + public FreezeResponse(StreamInput in) throws IOException { + super(in); + } + + public FreezeResponse(boolean acknowledged, boolean shardsAcknowledged) { + super(acknowledged, shardsAcknowledged); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index c1a682757d140..29c831438f906 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -121,6 +121,31 @@ public GraphExploreRequest types(String... types) { return this; } + public GraphExploreRequest(StreamInput in) throws IOException { + super(in); + + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + types = in.readStringArray(); + routing = in.readOptionalString(); + timeout = in.readOptionalTimeValue(); + sampleSize = in.readInt(); + sampleDiversityField = in.readOptionalString(); + maxDocsPerDiversityValue = in.readInt(); + + useSignificance = in.readBoolean(); + returnDetailedInfo = in.readBoolean(); + + int numHops = in.readInt(); + Hop parentHop = null; + for (int i = 0; i < numHops; i++) { + Hop hop = new Hop(parentHop); + hop.readFrom(in); + hops.add(hop); + parentHop = hop; + } + } + public String routing() { return this.routing; } @@ -162,33 +187,6 @@ public GraphExploreRequest timeout(String timeout) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - types = in.readStringArray(); - routing = in.readOptionalString(); - timeout = in.readOptionalTimeValue(); - sampleSize = in.readInt(); - sampleDiversityField = in.readOptionalString(); - maxDocsPerDiversityValue = in.readInt(); - - useSignificance = in.readBoolean(); - returnDetailedInfo = in.readBoolean(); - - int numHops = in.readInt(); - Hop parentHop = null; - for (int i = 0; i < numHops; i++) { - Hop hop = new Hop(parentHop); - hop.readFrom(in); - hops.add(hop); - parentHop = hop; - } - - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java index f93e344046aac..dc6702d0009d9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -48,42 +48,10 @@ public class GraphExploreResponse extends ActionResponse implements ToXContentOb private boolean returnDetailedInfo; static final String RETURN_DETAILED_INFO_PARAM = "returnDetailedInfo"; - public GraphExploreResponse() { - } - - public GraphExploreResponse(long tookInMillis, boolean timedOut, ShardOperationFailedException[] shardFailures, - Map vertices, Map connections, boolean returnDetailedInfo) { - this.tookInMillis = tookInMillis; - this.timedOut = timedOut; - this.shardFailures = shardFailures; - this.vertices = vertices; - this.connections = connections; - this.returnDetailedInfo = returnDetailedInfo; - } - - - public TimeValue getTook() { - return new TimeValue(tookInMillis); - } + public GraphExploreResponse() {} - public long getTookInMillis() { - return tookInMillis; - } - - /** - * @return true if the time stated in {@link GraphExploreRequest#timeout(TimeValue)} was exceeded - * (not all hops may have been completed in this case) - */ - public boolean isTimedOut() { - return this.timedOut; - } - public ShardOperationFailedException[] getShardFailures() { - return shardFailures; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public GraphExploreResponse(StreamInput in) throws IOException { + super(in); tookInMillis = in.readVLong(); timedOut = in.readBoolean(); @@ -111,11 +79,41 @@ public void readFrom(StreamInput in) throws IOException { Connection e = new Connection(in, vertices); connections.put(e.getId(), e); } - + returnDetailedInfo = in.readBoolean(); } + public GraphExploreResponse(long tookInMillis, boolean timedOut, ShardOperationFailedException[] shardFailures, + Map vertices, Map connections, boolean returnDetailedInfo) { + this.tookInMillis = tookInMillis; + this.timedOut = timedOut; + this.shardFailures = shardFailures; + this.vertices = vertices; + this.connections = connections; + this.returnDetailedInfo = returnDetailedInfo; + } + + + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } + + public long getTookInMillis() { + return tookInMillis; + } + + /** + * @return true if the time stated in {@link GraphExploreRequest#timeout(TimeValue)} was exceeded + * (not all hops may have been completed in this case) + */ + public boolean isTimedOut() { + return this.timedOut; + } + public ShardOperationFailedException[] getShardFailures() { + return shardFailures; + } + public Collection getConnections() { return connections.values(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java index 62353b093b5b5..77a110a59e7af 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java @@ -7,10 +7,19 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; public class DeleteLicenseRequest extends AcknowledgedRequest { + public DeleteLicenseRequest() {} + + public DeleteLicenseRequest(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java index bb9635c8e9db8..694ac0c8fb491 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java @@ -6,6 +6,7 @@ package org.elasticsearch.protocol.xpack.license; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -14,7 +15,8 @@ public class GetLicenseResponse extends ActionResponse { private String license; - GetLicenseResponse() { + public GetLicenseResponse(StreamInput in) throws IOException { + super(in); } public GetLicenseResponse(String license) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java index 342e6c296e7ed..ddf3cd0bffcb3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java @@ -7,12 +7,20 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; public class PutLicenseRequest extends AcknowledgedRequest { private String licenseDefinition; private boolean acknowledge = false; + public PutLicenseRequest(StreamInput in) throws IOException { + super(in); + + } + public PutLicenseRequest() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java index 17afee59fa156..d09b134462731 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java @@ -64,11 +64,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java index 656163f073ebf..a698529f9eb57 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java @@ -23,20 +23,15 @@ public class IndexUpgradeInfoResponse extends ActionResponse implements ToXConte private Map actions; - public IndexUpgradeInfoResponse() { - + public IndexUpgradeInfoResponse(StreamInput in) throws IOException { + super(in); + actions = in.readMap(StreamInput::readString, UpgradeActionRequired::readFromStream); } public IndexUpgradeInfoResponse(Map actions) { this.actions = actions; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - actions = in.readMap(StreamInput::readString, UpgradeActionRequired::readFromStream); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(actions, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java index 4a458b69a750d..6bd18e490bc2e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java @@ -22,14 +22,18 @@ public class DeleteWatchRequest extends ActionRequest { private String id; private long version = Versions.MATCH_ANY; - public DeleteWatchRequest() { - this(null); - } + public DeleteWatchRequest() {} public DeleteWatchRequest(String id) { this.id = id; } + public DeleteWatchRequest(StreamInput in) throws IOException { + super(in); + id = in.readString(); + version = in.readLong(); + } + /** * @return The name of the watch to be deleted */ @@ -55,13 +59,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - version = in.readLong(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java index 93c20abb883ea..cb15d67c61191 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java @@ -31,8 +31,7 @@ public class DeleteWatchResponse extends ActionResponse implements ToXContentObj private long version; private boolean found; - public DeleteWatchResponse() { - } + public DeleteWatchResponse() {} public DeleteWatchResponse(String id, long version, boolean found) { this.id = id; @@ -40,6 +39,13 @@ public DeleteWatchResponse(String id, long version, boolean found) { this.found = found; } + public DeleteWatchResponse(StreamInput in) throws IOException { + super(in); + id = in.readString(); + version = in.readVLong(); + found = in.readBoolean(); + } + public String getId() { return id; } @@ -79,14 +85,6 @@ public int hashCode() { return Objects.hash(id, version, found); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - version = in.readVLong(); - found = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java index 7ddafa8c70720..fbdea0cf04067 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java @@ -42,18 +42,7 @@ public final class PutWatchRequest extends ActionRequest { public PutWatchRequest() {} public PutWatchRequest(StreamInput in) throws IOException { - readFrom(in); - } - - public PutWatchRequest(String id, BytesReference source, XContentType xContentType) { - this.id = id; - this.source = source; - this.xContentType = xContentType; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + super(in); id = in.readString(); source = in.readBytesReference(); active = in.readBoolean(); @@ -63,6 +52,12 @@ public void readFrom(StreamInput in) throws IOException { ifPrimaryTerm = in.readVLong(); } + public PutWatchRequest(String id, BytesReference source, XContentType xContentType) { + this.id = id; + this.source = source; + this.xContentType = xContentType; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java index 27f2086d06182..7e116786ebd2b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java @@ -36,7 +36,15 @@ public class PutWatchResponse extends ActionResponse implements ToXContentObject private long primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; private boolean created; - public PutWatchResponse() { + public PutWatchResponse() {} + + public PutWatchResponse(StreamInput in) throws IOException { + super(in); + id = in.readString(); + version = in.readVLong(); + seqNo = in.readZLong(); + primaryTerm = in.readVLong(); + created = in.readBoolean(); } public PutWatchResponse(String id, long version, long seqNo, long primaryTerm, boolean created) { @@ -113,16 +121,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(created); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - version = in.readVLong(); - seqNo = in.readZLong(); - primaryTerm = in.readVLong(); - created = in.readBoolean(); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.startObject() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index fec75af8f0f67..6384f42eaa2ee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -30,7 +30,6 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.Task; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; @@ -52,6 +51,8 @@ import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.flattened.FlattenedFeatureSetUsage; +import org.elasticsearch.xpack.core.frozen.FrozenIndicesFeatureSetUsage; +import org.elasticsearch.xpack.core.frozen.action.FreezeIndexAction; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; @@ -191,6 +192,7 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; +import org.elasticsearch.xpack.core.spatial.SpatialFeatureSetUsage; import org.elasticsearch.xpack.core.sql.SqlFeatureSetUsage; import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; @@ -207,6 +209,11 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.DeleteSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.ExecuteSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.GetSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.PutSnapshotLifecycleAction; import java.util.ArrayList; import java.util.Arrays; @@ -372,7 +379,12 @@ public List> getClientActions() { RemoveIndexLifecyclePolicyAction.INSTANCE, MoveToStepAction.INSTANCE, RetryAction.INSTANCE, - TransportFreezeIndexAction.FreezeIndexAction.INSTANCE, + PutSnapshotLifecycleAction.INSTANCE, + GetSnapshotLifecycleAction.INSTANCE, + DeleteSnapshotLifecycleAction.INSTANCE, + ExecuteSnapshotLifecycleAction.INSTANCE, + // Freeze + FreezeIndexAction.INSTANCE, // Data Frame PutDataFrameTransformAction.INSTANCE, StartDataFrameTransformAction.INSTANCE, @@ -468,6 +480,9 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(MetaData.Custom.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.IndexLifecycleMetadataDiff::new), + new NamedWriteableRegistry.Entry(MetaData.Custom.class, SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, SnapshotLifecycleMetadata.TYPE, + SnapshotLifecycleMetadata.SnapshotLifecycleMetadataDiff::new), // ILM - LifecycleTypes new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, (in) -> TimeseriesLifecycleType.INSTANCE), @@ -491,7 +506,11 @@ public List getNamedWriteables() { // Vectors new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.VECTORS, VectorsFeatureSetUsage::new), // Voting Only Node - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.VOTING_ONLY, VotingOnlyNodeFeatureSetUsage::new) + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.VOTING_ONLY, VotingOnlyNodeFeatureSetUsage::new), + // Frozen indices + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.FROZEN_INDICES, FrozenIndicesFeatureSetUsage::new), + // Spatial + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 351606e321873..904db89bb542a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -43,6 +43,10 @@ public final class XPackField { public static final String VECTORS = "vectors"; /** Name constant for the voting-only-node feature. */ public static final String VOTING_ONLY = "voting_only"; + /** Name constant for the frozen index feature. */ + public static final String FROZEN_INDICES = "frozen_indices"; + /** Name constant for spatial features. */ + public static final String SPATIAL = "spatial"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index a9d4e9f8b9bec..0604f409ccd1e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -9,9 +9,9 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.Client; @@ -34,10 +34,8 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.engine.FrozenEngine; import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.LicensesMetaData; import org.elasticsearch.license.Licensing; @@ -57,7 +55,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; import org.elasticsearch.xpack.core.action.TransportReloadAnalyzersAction; import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; @@ -65,7 +62,6 @@ import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.ml.MlMetadata; -import org.elasticsearch.xpack.core.rest.action.RestFreezeIndexAction; import org.elasticsearch.xpack.core.rest.action.RestReloadAnalyzersAction; import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; @@ -249,8 +245,6 @@ public Collection createComponents(Client client, ClusterService cluster List> actions = new ArrayList<>(); actions.add(new ActionHandler<>(XPackInfoAction.INSTANCE, getInfoAction())); actions.add(new ActionHandler<>(XPackUsageAction.INSTANCE, getUsageAction())); - actions.add(new ActionHandler<>(TransportFreezeIndexAction.FreezeIndexAction.INSTANCE, - TransportFreezeIndexAction.class)); actions.addAll(licensing.getActions()); actions.add(new ActionHandler<>(ReloadAnalyzerAction.INSTANCE, TransportReloadAnalyzersAction.class)); return actions; @@ -288,7 +282,6 @@ public List getRestHandlers(Settings settings, RestController restC List handlers = new ArrayList<>(); handlers.add(new RestXPackInfoAction(settings, restController)); handlers.add(new RestXPackUsageAction(settings, restController)); - handlers.add(new RestFreezeIndexAction(settings, restController)); handlers.add(new RestReloadAnalyzersAction(settings, restController)); handlers.addAll(licensing.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, indexNameExpressionResolver, nodesInCluster)); @@ -354,8 +347,6 @@ public Map getRepositories(Environment env, NamedXCo public Optional getEngineFactory(IndexSettings indexSettings) { if (indexSettings.getValue(SourceOnlySnapshotRepository.SOURCE_ONLY)) { return Optional.of(SourceOnlySnapshotRepository.getEngineFactory()); - } else if (indexSettings.getValue(FrozenEngine.INDEX_FROZEN)) { - return Optional.of(FrozenEngine::new); } return Optional.empty(); @@ -365,15 +356,6 @@ public Optional getEngineFactory(IndexSettings indexSettings) { public List> getSettings() { List> settings = super.getSettings(); settings.add(SourceOnlySnapshotRepository.SOURCE_ONLY); - settings.add(FrozenEngine.INDEX_FROZEN); return settings; } - - @Override - public void onIndexModule(IndexModule indexModule) { - if (FrozenEngine.INDEX_FROZEN.get(indexModule.getSettings())) { - indexModule.addSearchOperationListener(new FrozenEngine.ReacquireEngineSearcherListener()); - } - super.onIndexModule(indexModule); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index ad11f103f33ea..618b4c86a9126 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -57,7 +57,7 @@ private XPackSettings() { /** Setting for enabling or disabling graph. Defaults to true. */ public static final Setting GRAPH_ENABLED = Setting.boolSetting("xpack.graph.enabled", true, Setting.Property.NodeScope); - /** Setting for enabling or disabling machine learning. Defaults to false. */ + /** Setting for enabling or disabling machine learning. Defaults to true. */ public static final Setting MACHINE_LEARNING_ENABLED = Setting.boolSetting("xpack.ml.enabled", true, Setting.Property.NodeScope); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractGetResourcesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractGetResourcesRequest.java index 214d84bb6b8d1..7d86862d0cdd1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractGetResourcesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractGetResourcesRequest.java @@ -66,14 +66,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - resourceId = in.readOptionalString(); - pageParams = in.readOptionalWriteable(PageParams::new); - allowNoResources = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractGetResourcesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractGetResourcesResponse.java index eb3cfa0b8f595..56e5428476419 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractGetResourcesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractGetResourcesResponse.java @@ -39,12 +39,6 @@ public QueryPage getResources() { return resources; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - resources = new QueryPage<>(in, getReader()); - } - @Override public void writeTo(StreamOutput out) throws IOException { resources.writeTo(out); @@ -84,5 +78,6 @@ public boolean equals(Object obj) { public final String toString() { return Strings.toString(this); } + protected abstract Reader getReader(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java index c414397dc539a..41e2605d9dba3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java @@ -42,7 +42,6 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -62,7 +61,8 @@ public abstract class AbstractTransportGetResourcesAction request, Client client, NamedXContentRegistry xContentRegistry) { + Writeable.Reader request, Client client, + NamedXContentRegistry xContentRegistry) { super(actionName, transportService, actionFilters, request); this.client = Objects.requireNonNull(client); this.xContentRegistry = Objects.requireNonNull(xContentRegistry); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java index 7427d7f642906..c6b7fce59f9f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.xpack.core.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ReloadAnalyzerAction extends StreamableResponseActionType { +public class ReloadAnalyzerAction extends ActionType { public static final ReloadAnalyzerAction INSTANCE = new ReloadAnalyzerAction(); public static final String NAME = "indices:admin/reload_analyzers"; private ReloadAnalyzerAction() { - super(NAME); - } - - @Override - public ReloadAnalyzersResponse newResponse() { - return new ReloadAnalyzersResponse(); + super(NAME, ReloadAnalyzersResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersRequest.java index 8721abd3403a7..31e99ec9d8544 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersRequest.java @@ -6,7 +6,9 @@ package org.elasticsearch.xpack.core.action; import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.Objects; @@ -22,6 +24,10 @@ public ReloadAnalyzersRequest(String... indices) { super(indices); } + public ReloadAnalyzersRequest(StreamInput in) throws IOException { + super(in); + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponse.java index b996c98bb5843..fd84ba2d8df77 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponse.java @@ -8,6 +8,9 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -15,12 +18,12 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Objects; import java.util.Set; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -31,14 +34,15 @@ public class ReloadAnalyzersResponse extends BroadcastResponse { private final Map reloadDetails; + private static final ParseField RELOAD_DETAILS_FIELD = new ParseField("reload_details"); private static final ParseField INDEX_FIELD = new ParseField("index"); private static final ParseField RELOADED_ANALYZERS_FIELD = new ParseField("reloaded_analyzers"); private static final ParseField RELOADED_NODE_IDS_FIELD = new ParseField("reloaded_node_ids"); - - public ReloadAnalyzersResponse() { - reloadDetails = Collections.emptyMap(); + public ReloadAnalyzersResponse(StreamInput in) throws IOException { + super(in); + this.reloadDetails = in.readMap(StreamInput::readString, ReloadDetails::new); } public ReloadAnalyzersResponse(int totalShards, int successfulShards, int failedShards, @@ -99,7 +103,30 @@ public static ReloadAnalyzersResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } - public static class ReloadDetails { + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(reloadDetails, StreamOutput::writeString, (stream, details) -> details.writeTo(stream)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReloadAnalyzersResponse that = (ReloadAnalyzersResponse) o; + return Objects.equals(reloadDetails, that.reloadDetails); + } + + @Override + public int hashCode() { + return Objects.hash(reloadDetails); + } + + public static class ReloadDetails implements Writeable { private final String indexName; private final Set reloadedIndicesNodes; @@ -111,6 +138,19 @@ public ReloadDetails(String name, Set reloadedIndicesNodes, Set this.reloadedAnalyzers = reloadedAnalyzers; } + ReloadDetails(StreamInput in) throws IOException { + this.indexName = in.readString(); + this.reloadedIndicesNodes = new HashSet<>(in.readList(StreamInput::readString)); + this.reloadedAnalyzers = new HashSet<>(in.readList(StreamInput::readString)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(indexName); + out.writeStringCollection(reloadedIndicesNodes); + out.writeStringCollection(reloadedAnalyzers); + } + public String getIndexName() { return indexName; } @@ -128,5 +168,24 @@ void merge(ReloadResult other) { this.reloadedAnalyzers.addAll(other.reloadedSearchAnalyzers); this.reloadedIndicesNodes.add(other.nodeId); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReloadDetails that = (ReloadDetails) o; + return Objects.equals(indexName, that.indexName) + && Objects.equals(reloadedIndicesNodes, that.reloadedIndicesNodes) + && Objects.equals(reloadedAnalyzers, that.reloadedAnalyzers); + } + + @Override + public int hashCode() { + return Objects.hash(indexName, reloadedIndicesNodes, reloadedAnalyzers); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportReloadAnalyzersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportReloadAnalyzersAction.java index 4ae7b45db731f..a6ff4478ac852 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportReloadAnalyzersAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportReloadAnalyzersAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; @@ -60,9 +60,7 @@ public TransportReloadAnalyzersAction(ClusterService clusterService, TransportSe @Override protected ReloadResult readShardResult(StreamInput in) throws IOException { - ReloadResult reloadResult = new ReloadResult(); - reloadResult.readFrom(in); - return reloadResult; + return new ReloadResult(in); } @Override @@ -84,9 +82,7 @@ protected ReloadAnalyzersResponse newResponse(ReloadAnalyzersRequest request, in @Override protected ReloadAnalyzersRequest readRequestFrom(StreamInput in) throws IOException { - final ReloadAnalyzersRequest request = new ReloadAnalyzersRequest(); - request.readFrom(in); - return request; + return new ReloadAnalyzersRequest(in); } @Override @@ -97,7 +93,7 @@ protected ReloadResult shardOperation(ReloadAnalyzersRequest request, ShardRouti return new ReloadResult(shardRouting.index().getName(), shardRouting.currentNodeId(), reloadedSearchAnalyzers); } - static final class ReloadResult implements Streamable { + static final class ReloadResult implements Writeable { String index; String nodeId; List reloadedSearchAnalyzers; @@ -108,11 +104,7 @@ private ReloadResult(String index, String nodeId, List reloadedSearchAna this.reloadedSearchAnalyzers = reloadedSearchAnalyzers; } - private ReloadResult() { - } - - @Override - public void readFrom(StreamInput in) throws IOException { + private ReloadResult(StreamInput in) throws IOException { this.index = in.readString(); this.nodeId = in.readString(); this.reloadedSearchAnalyzers = in.readStringList(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java index d252cec4b6afd..8910bf643cb7b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java @@ -33,8 +33,7 @@ public class TransportXPackInfoAction extends HandledTransportAction { +public class XPackInfoAction extends ActionType { public static final String NAME = "cluster:monitor/xpack/info"; public static final XPackInfoAction INSTANCE = new XPackInfoAction(); public XPackInfoAction() { - super(NAME); - } - - @Override - public XPackInfoResponse newResponse() { - return new XPackInfoResponse(); + super(NAME, XPackInfoResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java index 25cf63f3d45f7..2019256bb27e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.core.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.xpack.core.XPackField; import java.util.Arrays; @@ -18,7 +18,7 @@ * {@link XPackInfoAction} implementation iterates over the {@link #ALL} list of actions to form * the complete info result. */ -public class XPackInfoFeatureAction extends StreamableResponseActionType { +public class XPackInfoFeatureAction extends ActionType { private static final String BASE_NAME = "cluster:monitor/xpack/info/"; @@ -36,19 +36,16 @@ public class XPackInfoFeatureAction extends StreamableResponseActionType ALL = Arrays.asList( SECURITY, MONITORING, WATCHER, GRAPH, MACHINE_LEARNING, LOGSTASH, SQL, ROLLUP, INDEX_LIFECYCLE, CCR, DATA_FRAME, FLATTENED, - VECTORS, VOTING_ONLY + VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL ); private XPackInfoFeatureAction(String name) { - super(BASE_NAME + name); - } - - @Override - public XPackInfoFeatureResponse newResponse() { - return new XPackInfoFeatureResponse(); + super(BASE_NAME + name, XPackInfoFeatureResponse::new); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureResponse.java index 2fedde9b48f88..9b9f86cde0f88 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureResponse.java @@ -16,8 +16,9 @@ public class XPackInfoFeatureResponse extends ActionResponse { private FeatureSet info; - public XPackInfoFeatureResponse() { - // empty, for readFrom + public XPackInfoFeatureResponse(StreamInput in) throws IOException { + super(in); + info = new FeatureSet(in); } public XPackInfoFeatureResponse(FeatureSet info) { @@ -33,9 +34,4 @@ public void writeTo(StreamOutput out) throws IOException { info.writeTo(out); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - info = new FeatureSet(in); } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java index 96f9bb8310c08..28cbc15ef44ea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.xpack.core.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class XPackUsageAction extends StreamableResponseActionType { +public class XPackUsageAction extends ActionType { public static final String NAME = "cluster:monitor/xpack/usage"; public static final XPackUsageAction INSTANCE = new XPackUsageAction(); public XPackUsageAction() { - super(NAME); - } - - @Override - public XPackUsageResponse newResponse() { - return new XPackUsageResponse(); + super(NAME, XPackUsageResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java index 3c2c5b5b3e310..e5a9eca8f1fc5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.core.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.xpack.core.XPackField; import java.util.Arrays; @@ -18,7 +18,7 @@ * {@link XPackUsageAction} implementationn iterates over the {@link #ALL} list of actions to form * the complete usage result. */ -public class XPackUsageFeatureAction extends StreamableResponseActionType { +public class XPackUsageFeatureAction extends ActionType { private static final String BASE_NAME = "cluster:monitor/xpack/usage/"; @@ -36,19 +36,16 @@ public class XPackUsageFeatureAction extends StreamableResponseActionType ALL = Arrays.asList( SECURITY, MONITORING, WATCHER, GRAPH, MACHINE_LEARNING, LOGSTASH, SQL, ROLLUP, INDEX_LIFECYCLE, CCR, DATA_FRAME, FLATTENED, - VECTORS, VOTING_ONLY + VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL ); private XPackUsageFeatureAction(String name) { - super(BASE_NAME + name); - } - - @Override - public XPackUsageFeatureResponse newResponse() { - return new XPackUsageFeatureResponse(); + super(BASE_NAME + name, XPackUsageFeatureResponse::new); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java index 96fec7f32a422..9913df74cc314 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java @@ -16,8 +16,9 @@ public class XPackUsageFeatureResponse extends ActionResponse { private XPackFeatureSet.Usage usage; - public XPackUsageFeatureResponse() { - // empty, for readFrom + public XPackUsageFeatureResponse(StreamInput in) throws IOException { + super(in); + usage = in.readNamedWriteable(XPackFeatureSet.Usage.class); } public XPackUsageFeatureResponse(XPackFeatureSet.Usage usage) { @@ -33,9 +34,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(usage); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - usage = in.readNamedWriteable(XPackFeatureSet.Usage.class); } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureTransportAction.java index baaae6f622199..bfa24c4cc4c38 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureTransportAction.java @@ -11,17 +11,20 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + public abstract class XPackUsageFeatureTransportAction extends TransportMasterNodeAction { public XPackUsageFeatureTransportAction(String name, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(name, transportService, clusterService, threadPool, - actionFilters, indexNameExpressionResolver, XPackUsageRequest::new); + actionFilters, XPackUsageRequest::new, indexNameExpressionResolver); } @Override @@ -30,8 +33,8 @@ protected String executor() { } @Override - protected XPackUsageFeatureResponse newResponse() { - return new XPackUsageFeatureResponse(); + protected XPackUsageFeatureResponse read(StreamInput in) throws IOException { + return new XPackUsageFeatureResponse(in); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java index 66d5d7b095bf3..b39ca2c8ed8fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java @@ -18,7 +18,14 @@ public class XPackUsageResponse extends ActionResponse { private List usages; - public XPackUsageResponse() {} + public XPackUsageResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + usages = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + usages.add(in.readNamedWriteable(XPackFeatureSet.Usage.class)); + } + } public XPackUsageResponse(List usages) { this.usages = usages; @@ -36,13 +43,4 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - usages = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - usages.add(in.readNamedWriteable(XPackFeatureSet.Usage.class)); - } } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java index 67cdcddcc9c85..8ba8f09ff8fe9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java @@ -8,11 +8,10 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ccr.AutoFollowStats; @@ -26,12 +25,7 @@ public class CcrStatsAction extends ActionType { public static final CcrStatsAction INSTANCE = new CcrStatsAction(); private CcrStatsAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, CcrStatsAction.Response::new); } public static class Request extends MasterNodeRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java index 2020192d90fd2..95d74b19315b7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java @@ -5,13 +5,12 @@ */ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; import java.util.Objects; @@ -24,12 +23,7 @@ public class DeleteAutoFollowPatternAction extends ActionType getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java index 92cf64a52a046..b0c804c4e32a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java @@ -29,12 +29,7 @@ public class FollowInfoAction extends ActionType { public static final FollowInfoAction INSTANCE = new FollowInfoAction(); private FollowInfoAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, FollowInfoAction.Response::new); } public static class Request extends MasterNodeReadRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java index 44d27777d9fa7..b855e4838783c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java @@ -36,12 +36,7 @@ public class FollowStatsAction extends ActionType getResponseReader() { - return StatsResponses::new; + super(NAME, FollowStatsAction.StatsResponses::new); } public static class StatsResponses extends BaseTasksResponse implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java index a86ac8abf569f..1d4792aa44778 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.ParseField; @@ -19,18 +19,13 @@ import java.io.IOException; import java.util.Objects; -public class ForgetFollowerAction extends StreamableResponseActionType { +public class ForgetFollowerAction extends ActionType { public static final String NAME = "indices:admin/xpack/ccr/forget_follower"; public static final ForgetFollowerAction INSTANCE = new ForgetFollowerAction(); private ForgetFollowerAction() { - super(NAME); - } - - @Override - public BroadcastResponse newResponse() { - return new BroadcastResponse(); + super(NAME, BroadcastResponse::new); } /** @@ -115,8 +110,13 @@ public String leaderIndex() { return leaderIndex; } - public Request() { - + public Request(StreamInput in) throws IOException { + super(in); + followerCluster = in.readString(); + leaderIndex = in.readString(); + leaderRemoteCluster = in.readString(); + followerIndex = in.readString(); + followerIndexUUID = in.readString(); } /** @@ -142,15 +142,6 @@ public Request( this.followerIndexUUID = Objects.requireNonNull(followerIndexUUID); } - public Request(final StreamInput in) throws IOException { - super.readFrom(in); - followerCluster = in.readString(); - leaderIndex = in.readString(); - leaderRemoteCluster = in.readString(); - followerIndex = in.readString(); - followerIndexUUID = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java index 96ae218c96de9..39f1cc20fa87a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java @@ -6,13 +6,12 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; @@ -27,12 +26,7 @@ public class GetAutoFollowPatternAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, GetAutoFollowPatternAction.Response::new); } public static class Request extends MasterNodeReadRequest { @@ -93,7 +87,7 @@ public Map getAutoFollowPatterns() { } public Response(StreamInput in) throws IOException { - super.readFrom(in); + super(in); autoFollowPatterns = in.readMap(StreamInput::readString, AutoFollowPattern::readFrom); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java index 85224d09001ae..ee43cb5803fc8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java @@ -6,13 +6,12 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; import java.util.Objects; @@ -23,12 +22,7 @@ public class PauseFollowAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ccr/pause_follow"; private PauseFollowAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends MasterNodeRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index 3d6beb67f3de4..a76df2aeb92ba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -5,13 +5,12 @@ */ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -33,12 +32,7 @@ public class PutAutoFollowPatternAction extends ActionType private static final int MAX_NAME_BYTES = 255; private PutAutoFollowPatternAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 11af0344d2567..7425a617a32c2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; @@ -16,7 +16,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -33,12 +32,7 @@ public final class PutFollowAction extends ActionType public static final String NAME = "indices:admin/xpack/ccr/put_follow"; private PutFollowAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, PutFollowAction.Response::new); } public static class Request extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java index a55644facd2a9..b0a42bf9a4c9c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java @@ -6,13 +6,12 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -29,12 +28,7 @@ public final class ResumeFollowAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ccr/resume_follow"; private ResumeFollowAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends MasterNodeRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java index 2b879d7210327..a2be1090379c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java @@ -6,15 +6,14 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; @@ -26,12 +25,7 @@ public class UnfollowAction extends ActionType { public static final String NAME = "indices:admin/xpack/ccr/unfollow"; private UnfollowAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest implements IndicesRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java index 2dedef82eb3cf..9f526dd92d2f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java @@ -26,12 +26,14 @@ public final class DataFrameField { public static final ParseField INDEX_DOC_TYPE = new ParseField("doc_type"); public static final ParseField SOURCE = new ParseField("source"); public static final ParseField DESTINATION = new ParseField("dest"); + public static final ParseField FREQUENCY = new ParseField("frequency"); public static final ParseField FORCE = new ParseField("force"); public static final ParseField MAX_PAGE_SEARCH_SIZE = new ParseField("max_page_search_size"); public static final ParseField FIELD = new ParseField("field"); public static final ParseField SYNC = new ParseField("sync"); public static final ParseField TIME_BASED_SYNC = new ParseField("time"); public static final ParseField DELAY = new ParseField("delay"); + public static final ParseField DEFER_VALIDATION = new ParseField("defer_validation"); public static final ParseField ALLOW_NO_MATCH = new ParseField("allow_no_match"); /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java index 5d6cf54c44d14..6ea1134299d5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java @@ -32,6 +32,9 @@ public class DataFrameMessages { public static final String DATA_FRAME_FAILED_TO_PERSIST_STATS = "Failed to persist data frame statistics for transform [{0}]"; public static final String DATA_FRAME_UNKNOWN_TRANSFORM_STATS = "Statistics for transform [{0}] could not be found"; + public static final String DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM = + "Unable to stop data frame transform [{0}] as it is in a failed state with reason [{1}]." + + " Use force stop to stop the data frame transform."; public static final String FAILED_TO_CREATE_DESTINATION_INDEX = "Could not create destination index [{0}] for transform [{1}]"; public static final String FAILED_TO_LOAD_TRANSFORM_CONFIGURATION = "Failed to load data frame transform configuration for transform [{0}]"; @@ -40,7 +43,9 @@ public class DataFrameMessages { public static final String FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION = "Failed to parse transform statistics for data frame transform [{0}]"; public static final String FAILED_TO_LOAD_TRANSFORM_CHECKPOINT = - "Failed to load data frame transform configuration for transform [{0}]"; + "Failed to load data frame transform checkpoint for transform [{0}]"; + public static final String FAILED_TO_LOAD_TRANSFORM_STATE = + "Failed to load data frame transform state for transform [{0}]"; public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_NO_TRANSFORM = "Data frame transform configuration must specify exactly 1 function"; public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY = diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java index 5f6d7dccdf2d0..a3bd257520bb5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.core.dataframe.action; -import org.elasticsearch.action.ActionType; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; @@ -24,34 +24,43 @@ public class DeleteDataFrameTransformAction extends ActionType getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends MasterNodeRequest { - private String id; + private final String id; + private final boolean force; - public Request(String id) { + public Request(String id, boolean force) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); + this.force = force; } public Request(StreamInput in) throws IOException { super(in); id = in.readString(); + if (in.getVersion().onOrAfter(Version.CURRENT)) { + force = in.readBoolean(); + } else { + force = false; + } } public String getId() { return id; } + public boolean isForce() { + return force; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); + if (out.getVersion().onOrAfter(Version.CURRENT)) { + out.writeBoolean(force); + } } @Override @@ -61,7 +70,7 @@ public ActionRequestValidationException validate() { @Override public int hashCode() { - return Objects.hash(id); + return Objects.hash(id, force); } @Override @@ -74,7 +83,7 @@ public boolean equals(Object obj) { return false; } Request other = (Request) obj; - return Objects.equals(id, other.id); + return Objects.equals(id, other.id) && force == other.force; } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java index afeaaaf4b2d93..93e9dcaf35567 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java @@ -37,12 +37,7 @@ public class GetDataFrameTransformsAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, GetDataFrameTransformsAction.Response::new); } public static class Request extends AbstractGetResourcesRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java index 8c4438e09db2c..ae451559b3d50 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java @@ -8,8 +8,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -17,7 +17,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; @@ -39,12 +38,7 @@ public class GetDataFrameTransformsStatsAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, GetDataFrameTransformsStatsAction.Response::new); } public static class Request extends BaseTasksRequest { @@ -200,11 +194,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java index 904a45042712e..b4d885d70f9d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java @@ -7,15 +7,14 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -42,12 +41,7 @@ public class PreviewDataFrameTransformAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, PreviewDataFrameTransformAction.Response::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java index 00873d7630754..7b9db2a4d3a59 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java @@ -6,21 +6,20 @@ package org.elasticsearch.xpack.core.dataframe.action; -import org.elasticsearch.action.ActionType; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.utils.DataFrameStrings; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import java.io.IOException; import java.util.Locale; @@ -34,32 +33,41 @@ public class PutDataFrameTransformAction extends ActionType getResponseReader() { - return AcknowledgedResponse::new; + private PutDataFrameTransformAction() { + super(NAME, AcknowledgedResponse::new); } - public static class Request extends AcknowledgedRequest implements ToXContentObject { + public static class Request extends AcknowledgedRequest { private final DataFrameTransformConfig config; + private final boolean deferValidation; - public Request(DataFrameTransformConfig config) { + public Request(DataFrameTransformConfig config, boolean deferValidation) { this.config = config; + this.deferValidation = deferValidation; } public Request(StreamInput in) throws IOException { super(in); this.config = new DataFrameTransformConfig(in); + if (in.getVersion().onOrAfter(Version.CURRENT)) { + this.deferValidation = in.readBoolean(); + } else { + this.deferValidation = false; + } } - public static Request fromXContent(final XContentParser parser, final String id) throws IOException { - return new Request(DataFrameTransformConfig.fromXContent(parser, id, false)); + public static Request fromXContent(final XContentParser parser, final String id, final boolean deferValidation) { + return new Request(DataFrameTransformConfig.fromXContent(parser, id, false), deferValidation); } + /** + * More complex validations with how {@link DataFrameTransformConfig#getDestination()} and + * {@link DataFrameTransformConfig#getSource()} relate are done in the transport handler. + */ @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -93,27 +101,42 @@ public ActionRequestValidationException validate() { DataFrameMessages.getMessage(DataFrameMessages.ID_TOO_LONG, DataFrameStrings.ID_LENGTH_LIMIT), validationException); } - return validationException; - } + TimeValue frequency = config.getFrequency(); + if (frequency != null) { + if (frequency.compareTo(MIN_FREQUENCY) < 0) { + validationException = addValidationError( + "minimum permitted [" + DataFrameField.FREQUENCY + "] is [" + MIN_FREQUENCY.getStringRep() + "]", + validationException); + } else if (frequency.compareTo(MAX_FREQUENCY) > 0) { + validationException = addValidationError( + "highest permitted [" + DataFrameField.FREQUENCY + "] is [" + MAX_FREQUENCY.getStringRep() + "]", + validationException); + } + } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return this.config.toXContent(builder, params); + return validationException; } public DataFrameTransformConfig getConfig() { return config; } + public boolean isDeferValidation() { + return deferValidation; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); this.config.writeTo(out); + if (out.getVersion().onOrAfter(Version.CURRENT)) { + out.writeBoolean(this.deferValidation); + } } @Override public int hashCode() { - return Objects.hash(config); + return Objects.hash(config, deferValidation); } @Override @@ -125,7 +148,7 @@ public boolean equals(Object obj) { return false; } Request other = (Request) obj; - return Objects.equals(config, other.config); + return Objects.equals(config, other.config) && this.deferValidation == other.deferValidation; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java index 2854232c543ec..7473193e4390e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java @@ -6,13 +6,12 @@ package org.elasticsearch.xpack.core.dataframe.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -28,12 +27,7 @@ public class StartDataFrameTransformAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, StartDataFrameTransformAction.Response::new); } public static class Request extends AcknowledgedRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java index 6d6bf8e7db70c..e1ebe4eb0ab3d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java @@ -6,13 +6,12 @@ package org.elasticsearch.xpack.core.dataframe.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; @@ -29,12 +28,7 @@ public class StartDataFrameTransformTaskAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, StartDataFrameTransformTaskAction.Response::new); } public static class Request extends BaseTasksRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java index de62d59c5e0c1..eef244551a33c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java @@ -40,12 +40,7 @@ public class StopDataFrameTransformAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, StopDataFrameTransformAction.Response::new); } public static class Request extends BaseTasksRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPosition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPosition.java new file mode 100644 index 0000000000000..84d3a655593d6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPosition.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class DataFrameIndexerPosition implements Writeable, ToXContentObject { + public static final String NAME = "data_frame/indexer_position"; + + public static final ParseField INDEXER_POSITION = new ParseField("indexer_position"); + public static final ParseField BUCKET_POSITION = new ParseField("bucket_position"); + + private final Map indexerPosition; + private final Map bucketPosition; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + true, + args -> new DataFrameIndexerPosition((Map) args[0],(Map) args[1])); + + static { + PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, INDEXER_POSITION, ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, BUCKET_POSITION, ValueType.OBJECT); + } + + public DataFrameIndexerPosition(Map indexerPosition, Map bucketPosition) { + this.indexerPosition = indexerPosition == null ? null : Collections.unmodifiableMap(indexerPosition); + this.bucketPosition = bucketPosition == null ? null : Collections.unmodifiableMap(bucketPosition); + } + + public DataFrameIndexerPosition(StreamInput in) throws IOException { + Map position = in.readMap(); + indexerPosition = position == null ? null : Collections.unmodifiableMap(position); + position = in.readMap(); + bucketPosition = position == null ? null : Collections.unmodifiableMap(position); + } + + public Map getIndexerPosition() { + return indexerPosition; + } + + public Map getBucketsPosition() { + return bucketPosition; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(indexerPosition); + out.writeMap(bucketPosition); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (indexerPosition != null) { + builder.field(INDEXER_POSITION.getPreferredName(), indexerPosition); + } + if (bucketPosition != null) { + builder.field(BUCKET_POSITION.getPreferredName(), bucketPosition); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameIndexerPosition that = (DataFrameIndexerPosition) other; + + return Objects.equals(this.indexerPosition, that.indexerPosition) && + Objects.equals(this.bucketPosition, that.bucketPosition); + } + + @Override + public int hashCode() { + return Objects.hash(indexerPosition, bucketPosition); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static DataFrameIndexerPosition fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransform.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransform.java index e620e4f859543..f7c14b0439ad3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransform.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransform.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -24,25 +25,30 @@ public class DataFrameTransform extends AbstractDiffable imp public static final String NAME = DataFrameField.TASK_NAME; public static final ParseField VERSION = new ParseField(DataFrameField.VERSION); + public static final ParseField FREQUENCY = DataFrameField.FREQUENCY; private final String transformId; private final Version version; + private final TimeValue frequency; - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, - a -> new DataFrameTransform((String) a[0], (String) a[1])); + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, + a -> new DataFrameTransform((String) a[0], (String) a[1], (String) a[2])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), DataFrameField.ID); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), VERSION); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FREQUENCY); } - private DataFrameTransform(String transformId, String version) { - this(transformId, version == null ? null : Version.fromString(version)); + private DataFrameTransform(String transformId, String version, String frequency) { + this(transformId, version == null ? null : Version.fromString(version), + frequency == null ? null : TimeValue.parseTimeValue(frequency, FREQUENCY.getPreferredName())); } - public DataFrameTransform(String transformId, Version version) { + public DataFrameTransform(String transformId, Version version, TimeValue frequency) { this.transformId = transformId; this.version = version == null ? Version.V_7_2_0 : version; + this.frequency = frequency; } public DataFrameTransform(StreamInput in) throws IOException { @@ -52,6 +58,11 @@ public DataFrameTransform(StreamInput in) throws IOException { } else { this.version = Version.V_7_2_0; } + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + this.frequency = in.readOptionalTimeValue(); + } else { + this.frequency = null; + } } @Override @@ -70,6 +81,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_7_3_0)) { Version.writeVersion(version, out); } + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + out.writeOptionalTimeValue(frequency); + } } @Override @@ -77,6 +91,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(DataFrameField.ID.getPreferredName(), transformId); builder.field(VERSION.getPreferredName(), version); + if (frequency != null) { + builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); + } builder.endObject(); return builder; } @@ -89,6 +106,10 @@ public Version getVersion() { return version; } + public TimeValue getFrequency() { + return frequency; + } + public static DataFrameTransform fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } @@ -105,11 +126,13 @@ public boolean equals(Object other) { DataFrameTransform that = (DataFrameTransform) other; - return Objects.equals(this.transformId, that.transformId) && Objects.equals(this.version, that.version); + return Objects.equals(this.transformId, that.transformId) + && Objects.equals(this.version, that.version) + && Objects.equals(this.frequency, that.frequency); } @Override public int hashCode() { - return Objects.hash(transformId, version); + return Objects.hash(transformId, version, frequency); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java index 8b1cf8b42e3d8..ec99827fdcdab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java @@ -279,18 +279,16 @@ public static long getBehind(DataFrameTransformCheckpoint oldCheckpoint, DataFra throw new IllegalArgumentException("old checkpoint is newer than new checkpoint"); } - // all old indices must be contained in the new ones but not vice versa - if (newCheckpoint.indicesCheckpoints.keySet().containsAll(oldCheckpoint.indicesCheckpoints.keySet()) == false) { - return -1L; - } - // get the sum of of shard checkpoints // note: we require shard checkpoints to strictly increase and never decrease long oldCheckPointSum = 0; long newCheckPointSum = 0; - for (long[] v : oldCheckpoint.indicesCheckpoints.values()) { - oldCheckPointSum += Arrays.stream(v).sum(); + for (Entry entry : oldCheckpoint.indicesCheckpoints.entrySet()) { + // ignore entries that aren't part of newCheckpoint, e.g. deleted indices + if (newCheckpoint.indicesCheckpoints.containsKey(entry.getKey())) { + oldCheckPointSum += Arrays.stream(entry.getValue()).sum(); + } } for (long[] v : newCheckpoint.indicesCheckpoints.values()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java index e3ad50d9b889e..54d0ff7298383 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -56,6 +57,7 @@ public class DataFrameTransformConfig extends AbstractDiffable create SourceConfig source = (SourceConfig) args[1]; DestConfig dest = (DestConfig) args[2]; - SyncConfig syncConfig = (SyncConfig) args[3]; - // ignored, only for internal storage: String docType = (String) args[4]; + TimeValue frequency = + args[3] == null ? null : TimeValue.parseTimeValue((String) args[3], DataFrameField.FREQUENCY.getPreferredName()); + + SyncConfig syncConfig = (SyncConfig) args[4]; + // ignored, only for internal storage: String docType = (String) args[5]; // on strict parsing do not allow injection of headers, transform version, or create time if (lenient == false) { - validateStrictParsingParams(args[5], HEADERS.getPreferredName()); - validateStrictParsingParams(args[8], CREATE_TIME.getPreferredName()); - validateStrictParsingParams(args[9], VERSION.getPreferredName()); + validateStrictParsingParams(args[6], HEADERS.getPreferredName()); + validateStrictParsingParams(args[9], CREATE_TIME.getPreferredName()); + validateStrictParsingParams(args[10], VERSION.getPreferredName()); } @SuppressWarnings("unchecked") - Map headers = (Map) args[5]; + Map headers = (Map) args[6]; - PivotConfig pivotConfig = (PivotConfig) args[6]; - String description = (String)args[7]; + PivotConfig pivotConfig = (PivotConfig) args[7]; + String description = (String)args[8]; return new DataFrameTransformConfig(id, source, dest, + frequency, syncConfig, headers, pivotConfig, description, - (Instant)args[8], - (String)args[9]); + (Instant)args[9], + (String)args[10]); }); parser.declareString(optionalConstructorArg(), DataFrameField.ID); parser.declareObject(constructorArg(), (p, c) -> SourceConfig.fromXContent(p, lenient), DataFrameField.SOURCE); parser.declareObject(constructorArg(), (p, c) -> DestConfig.fromXContent(p, lenient), DataFrameField.DESTINATION); + parser.declareString(optionalConstructorArg(), DataFrameField.FREQUENCY); parser.declareObject(optionalConstructorArg(), (p, c) -> parseSyncConfig(p, lenient), DataFrameField.SYNC); @@ -146,6 +153,7 @@ public static String documentId(String transformId) { DataFrameTransformConfig(final String id, final SourceConfig source, final DestConfig dest, + final TimeValue frequency, final SyncConfig syncConfig, final Map headers, final PivotConfig pivotConfig, @@ -155,6 +163,7 @@ public static String documentId(String transformId) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); this.source = ExceptionsHelper.requireNonNull(source, DataFrameField.SOURCE.getPreferredName()); this.dest = ExceptionsHelper.requireNonNull(dest, DataFrameField.DESTINATION.getPreferredName()); + this.frequency = frequency; this.syncConfig = syncConfig; this.setHeaders(headers == null ? Collections.emptyMap() : headers); this.pivotConfig = pivotConfig; @@ -174,17 +183,23 @@ public static String documentId(String transformId) { public DataFrameTransformConfig(final String id, final SourceConfig source, final DestConfig dest, + final TimeValue frequency, final SyncConfig syncConfig, final Map headers, final PivotConfig pivotConfig, final String description) { - this(id, source, dest, syncConfig, headers, pivotConfig, description, null, null); + this(id, source, dest, frequency, syncConfig, headers, pivotConfig, description, null, null); } public DataFrameTransformConfig(final StreamInput in) throws IOException { id = in.readString(); source = new SourceConfig(in); dest = new DestConfig(in); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + frequency = in.readOptionalTimeValue(); + } else { + frequency = null; + } setHeaders(in.readMap(StreamInput::readString, StreamInput::readString)); pivotConfig = in.readOptionalWriteable(PivotConfig::new); description = in.readOptionalString(); @@ -211,6 +226,10 @@ public DestConfig getDestination() { return dest; } + public TimeValue getFrequency() { + return frequency; + } + public SyncConfig getSyncConfig() { return syncConfig; } @@ -269,6 +288,9 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(id); source.writeTo(out); dest.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + out.writeOptionalTimeValue(frequency); + } out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); out.writeOptionalWriteable(pivotConfig); out.writeOptionalString(description); @@ -290,6 +312,9 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa builder.field(DataFrameField.ID.getPreferredName(), id); builder.field(DataFrameField.SOURCE.getPreferredName(), source); builder.field(DataFrameField.DESTINATION.getPreferredName(), dest); + if (frequency != null) { + builder.field(DataFrameField.FREQUENCY.getPreferredName(), frequency.getStringRep()); + } if (syncConfig != null) { builder.startObject(DataFrameField.SYNC.getPreferredName()); builder.field(syncConfig.getWriteableName(), syncConfig); @@ -332,6 +357,7 @@ public boolean equals(Object other) { return Objects.equals(this.id, that.id) && Objects.equals(this.source, that.source) && Objects.equals(this.dest, that.dest) + && Objects.equals(this.frequency, that.frequency) && Objects.equals(this.syncConfig, that.syncConfig) && Objects.equals(this.headers, that.headers) && Objects.equals(this.pivotConfig, that.pivotConfig) @@ -342,7 +368,7 @@ public boolean equals(Object other) { @Override public int hashCode(){ - return Objects.hash(id, source, dest, syncConfig, headers, pivotConfig, description, createTime, transformVersion); + return Objects.hash(id, source, dest, frequency, syncConfig, headers, pivotConfig, description, createTime, transformVersion); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java index 2c3ad36d6849a..f942f0dd2a9dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java @@ -22,8 +22,6 @@ import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; -import java.util.Collections; -import java.util.LinkedHashMap; import java.util.Map; import java.util.Objects; @@ -39,7 +37,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState private final long checkpoint; @Nullable - private final Map currentPosition; + private final DataFrameIndexerPosition position; @Nullable private final String reason; @Nullable @@ -47,7 +45,10 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState public static final ParseField TASK_STATE = new ParseField("task_state"); public static final ParseField INDEXER_STATE = new ParseField("indexer_state"); + + // 7.3 BWC: current_position only exists in 7.2. In 7.3+ it is replaced by position. public static final ParseField CURRENT_POSITION = new ParseField("current_position"); + public static final ParseField POSITION = new ParseField("position"); public static final ParseField CHECKPOINT = new ParseField("checkpoint"); public static final ParseField REASON = new ParseField("reason"); public static final ParseField PROGRESS = new ParseField("progress"); @@ -56,18 +57,30 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, - args -> new DataFrameTransformState((DataFrameTransformTaskState) args[0], - (IndexerState) args[1], - (Map) args[2], - (long) args[3], - (String) args[4], - (DataFrameTransformProgress) args[5], - (NodeAttributes) args[6])); + args -> { + DataFrameTransformTaskState taskState = (DataFrameTransformTaskState) args[0]; + IndexerState indexerState = (IndexerState) args[1]; + Map bwcCurrentPosition = (Map) args[2]; + DataFrameIndexerPosition dataFrameIndexerPosition = (DataFrameIndexerPosition) args[3]; + + // BWC handling, translate current_position to position iff position isn't set + if (bwcCurrentPosition != null && dataFrameIndexerPosition == null) { + dataFrameIndexerPosition = new DataFrameIndexerPosition(bwcCurrentPosition, null); + } + + long checkpoint = (long) args[4]; + String reason = (String) args[5]; + DataFrameTransformProgress progress = (DataFrameTransformProgress) args[6]; + NodeAttributes node = (NodeAttributes) args[7]; + + return new DataFrameTransformState(taskState, indexerState, dataFrameIndexerPosition, checkpoint, reason, progress, node); + }); static { PARSER.declareField(constructorArg(), p -> DataFrameTransformTaskState.fromString(p.text()), TASK_STATE, ValueType.STRING); PARSER.declareField(constructorArg(), p -> IndexerState.fromString(p.text()), INDEXER_STATE, ValueType.STRING); PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, CURRENT_POSITION, ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), DataFrameIndexerPosition::fromXContent, POSITION, ValueType.OBJECT); PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT); PARSER.declareString(optionalConstructorArg(), REASON); PARSER.declareField(optionalConstructorArg(), DataFrameTransformProgress.PARSER::apply, PROGRESS, ValueType.OBJECT); @@ -76,14 +89,14 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState public DataFrameTransformState(DataFrameTransformTaskState taskState, IndexerState indexerState, - @Nullable Map position, + @Nullable DataFrameIndexerPosition position, long checkpoint, @Nullable String reason, @Nullable DataFrameTransformProgress progress, @Nullable NodeAttributes node) { this.taskState = taskState; this.indexerState = indexerState; - this.currentPosition = position == null ? null : Collections.unmodifiableMap(new LinkedHashMap<>(position)); + this.position = position; this.checkpoint = checkpoint; this.reason = reason; this.progress = progress; @@ -92,7 +105,7 @@ public DataFrameTransformState(DataFrameTransformTaskState taskState, public DataFrameTransformState(DataFrameTransformTaskState taskState, IndexerState indexerState, - @Nullable Map position, + @Nullable DataFrameIndexerPosition position, long checkpoint, @Nullable String reason, @Nullable DataFrameTransformProgress progress) { @@ -102,8 +115,12 @@ public DataFrameTransformState(DataFrameTransformTaskState taskState, public DataFrameTransformState(StreamInput in) throws IOException { taskState = DataFrameTransformTaskState.fromStream(in); indexerState = IndexerState.fromStream(in); - Map position = in.readMap(); - currentPosition = position == null ? null : Collections.unmodifiableMap(position); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + position = in.readOptionalWriteable(DataFrameIndexerPosition::new); + } else { + Map pos = in.readMap(); + position = new DataFrameIndexerPosition(pos, null); + } checkpoint = in.readLong(); reason = in.readOptionalString(); progress = in.readOptionalWriteable(DataFrameTransformProgress::new); @@ -122,8 +139,8 @@ public IndexerState getIndexerState() { return indexerState; } - public Map getPosition() { - return currentPosition; + public DataFrameIndexerPosition getPosition() { + return position; } public long getCheckpoint() { @@ -169,8 +186,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(TASK_STATE.getPreferredName(), taskState.value()); builder.field(INDEXER_STATE.getPreferredName(), indexerState.value()); - if (currentPosition != null) { - builder.field(CURRENT_POSITION.getPreferredName(), currentPosition); + if (position != null) { + builder.field(POSITION.getPreferredName(), position); } builder.field(CHECKPOINT.getPreferredName(), checkpoint); if (reason != null) { @@ -195,7 +212,11 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { taskState.writeTo(out); indexerState.writeTo(out); - out.writeMap(currentPosition); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + out.writeOptionalWriteable(position); + } else { + out.writeMap(position != null ? position.getIndexerPosition() : null); + } out.writeLong(checkpoint); out.writeOptionalString(reason); out.writeOptionalWriteable(progress); @@ -218,7 +239,7 @@ public boolean equals(Object other) { return Objects.equals(this.taskState, that.taskState) && Objects.equals(this.indexerState, that.indexerState) && - Objects.equals(this.currentPosition, that.currentPosition) && + Objects.equals(this.position, that.position) && this.checkpoint == that.checkpoint && Objects.equals(this.reason, that.reason) && Objects.equals(this.progress, that.progress) && @@ -227,7 +248,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason, progress, node); + return Objects.hash(taskState, indexerState, position, checkpoint, reason, progress, node); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfig.java index 0490394d90b26..607d48b741fce 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfig.java @@ -27,6 +27,7 @@ public class TimeSyncConfig implements SyncConfig { + public static final TimeValue DEFAULT_DELAY = TimeValue.timeValueSeconds(60); private static final String NAME = "data_frame_transform_pivot_sync_time"; private final String field; @@ -37,20 +38,18 @@ public class TimeSyncConfig implements SyncConfig { private static ConstructingObjectParser createParser(boolean lenient) { ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, - args -> { - String field = (String) args[0]; - TimeValue delay = args[1] != null ? (TimeValue) args[1] : TimeValue.ZERO; - - return new TimeSyncConfig(field, delay); - }); - + args -> { + String field = (String) args[0]; + TimeValue delay = (TimeValue) args[1]; + return new TimeSyncConfig(field, delay); + }); parser.declareString(constructorArg(), DataFrameField.FIELD); parser.declareField(optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.textOrNull(), DataFrameField.DELAY.getPreferredName()), DataFrameField.DELAY, - ObjectParser.ValueType.STRING_OR_NULL); - - return parser; - } + (p, c) -> TimeValue.parseTimeValue(p.text(), DEFAULT_DELAY, DataFrameField.DELAY.getPreferredName()), + DataFrameField.DELAY, + ObjectParser.ValueType.STRING); + return parser; + } public TimeSyncConfig() { this(null, null); @@ -58,7 +57,7 @@ public TimeSyncConfig() { public TimeSyncConfig(final String field, final TimeValue delay) { this.field = ExceptionsHelper.requireNonNull(field, DataFrameField.FIELD.getPreferredName()); - this.delay = ExceptionsHelper.requireNonNull(delay, DataFrameField.DELAY.getPreferredName()); + this.delay = delay == null ? DEFAULT_DELAY : delay; } public TimeSyncConfig(StreamInput in) throws IOException { @@ -89,9 +88,7 @@ public void writeTo(final StreamOutput out) throws IOException { public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); builder.field(DataFrameField.FIELD.getPreferredName(), field); - if (delay.duration() > 0) { - builder.field(DataFrameField.DELAY.getPreferredName(), delay.getStringRep()); - } + builder.field(DataFrameField.DELAY.getPreferredName(), delay.getStringRep()); builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java index 4741a26de352e..72d3c402e3b1b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; @@ -39,13 +39,13 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; -public class DeprecationInfoAction extends StreamableResponseActionType { +public class DeprecationInfoAction extends ActionType { public static final DeprecationInfoAction INSTANCE = new DeprecationInfoAction(); public static final String NAME = "cluster:admin/xpack/deprecation/info"; private DeprecationInfoAction() { - super(NAME); + super(NAME, DeprecationInfoAction.Response::new); } /** @@ -77,18 +77,18 @@ private static List mergeNodeIssues(NodesDeprecationCheckRespo }).collect(Collectors.toList()); } - @Override - public Response newResponse() { - return new Response(); - } - public static class Response extends ActionResponse implements ToXContentObject { private List clusterSettingsIssues; private List nodeSettingsIssues; private Map> indexSettingsIssues; private List mlSettingsIssues; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + clusterSettingsIssues = in.readList(DeprecationIssue::new); + nodeSettingsIssues = in.readList(DeprecationIssue::new); + indexSettingsIssues = in.readMapOfLists(StreamInput::readString, DeprecationIssue::new); + mlSettingsIssues = in.readList(DeprecationIssue::new); } public Response(List clusterSettingsIssues, @@ -117,15 +117,6 @@ public List getMlSettingsIssues() { return mlSettingsIssues; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - clusterSettingsIssues = in.readList(DeprecationIssue::new); - nodeSettingsIssues = in.readList(DeprecationIssue::new); - indexSettingsIssues = in.readMapOfLists(StreamInput::readString, DeprecationIssue::new); - mlSettingsIssues = in.readList(DeprecationIssue::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(clusterSettingsIssues); @@ -262,11 +253,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckAction.java index b650da55fc0b0..9991abe0f2e63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.deprecation; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; @@ -24,35 +23,26 @@ * Runs deprecation checks on each node. Deprecation checks are performed locally so that filtered settings * can be accessed in the deprecation checks. */ -public class NodesDeprecationCheckAction extends StreamableResponseActionType { +public class NodesDeprecationCheckAction extends ActionType { public static final NodesDeprecationCheckAction INSTANCE = new NodesDeprecationCheckAction(); public static final String NAME = "cluster:admin/xpack/deprecation/nodes/info"; private NodesDeprecationCheckAction() { - super(NAME); - } - - @Override - public NodesDeprecationCheckResponse newResponse() { - return new NodesDeprecationCheckResponse(); + super(NAME, NodesDeprecationCheckResponse::new); } public static class NodeRequest extends BaseNodeRequest { NodesDeprecationCheckRequest request; - public NodeRequest() {} + public NodeRequest(StreamInput in) throws IOException { + super(in); + request = new NodesDeprecationCheckRequest(in); + } public NodeRequest(NodesDeprecationCheckRequest request) { this.request = request; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = new NodesDeprecationCheckRequest(); - request.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -63,8 +53,9 @@ public void writeTo(StreamOutput out) throws IOException { public static class NodeResponse extends BaseNodeResponse { private List deprecationIssues; - public NodeResponse() { - super(); + public NodeResponse(StreamInput in) throws IOException { + super(in); + deprecationIssues = in.readList(DeprecationIssue::new); } public NodeResponse(DiscoveryNode node, List deprecationIssues) { @@ -72,24 +63,12 @@ public NodeResponse(DiscoveryNode node, List deprecationIssues this.deprecationIssues = deprecationIssues; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - deprecationIssues = in.readList(DeprecationIssue::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeList(this.deprecationIssues); } - public static NodeResponse readNodeResponse(StreamInput in) throws IOException { - NodeResponse nodeResponse = new NodeResponse(); - nodeResponse.readFrom(in); - return nodeResponse; - } - public List getDeprecationIssues() { return deprecationIssues; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckRequest.java index af7b2da6f55eb..2805047ee965c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckRequest.java @@ -15,17 +15,14 @@ import java.util.Objects; public class NodesDeprecationCheckRequest extends BaseNodesRequest { - public NodesDeprecationCheckRequest() {} + public NodesDeprecationCheckRequest(StreamInput in) throws IOException { + super(in); + } public NodesDeprecationCheckRequest(String... nodesIds) { super(nodesIds); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckResponse.java index db7dbc6a381e2..4a7ae1fc775a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckResponse.java @@ -18,7 +18,9 @@ public class NodesDeprecationCheckResponse extends BaseNodesResponse { - public NodesDeprecationCheckResponse() {} + public NodesDeprecationCheckResponse(StreamInput in) throws IOException { + super(in); + } public NodesDeprecationCheckResponse(ClusterName clusterName, List nodes, @@ -28,12 +30,12 @@ public NodesDeprecationCheckResponse(ClusterName clusterName, @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodesDeprecationCheckAction.NodeResponse::readNodeResponse); + return in.readList(NodesDeprecationCheckAction.NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java new file mode 100644 index 0000000000000..cc556330497f3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.frozen; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Objects; + +public class FrozenIndicesFeatureSetUsage extends XPackFeatureSet.Usage { + + private final int numberOfFrozenIndices; + + public FrozenIndicesFeatureSetUsage(StreamInput input) throws IOException { + super(input); + numberOfFrozenIndices = input.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(numberOfFrozenIndices); + } + + public FrozenIndicesFeatureSetUsage(boolean available, boolean enabled, int numberOfFrozenIndices) { + super(XPackField.FROZEN_INDICES, available, enabled); + this.numberOfFrozenIndices = numberOfFrozenIndices; + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + builder.field("indices_count", numberOfFrozenIndices); + } + + public int getNumberOfFrozenIndices() { + return numberOfFrozenIndices; + } + + @Override + public int hashCode() { + return Objects.hash(available, enabled, numberOfFrozenIndices); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + FrozenIndicesFeatureSetUsage other = (FrozenIndicesFeatureSetUsage) obj; + return Objects.equals(available, other.available) && + Objects.equals(enabled, other.enabled) && + Objects.equals(numberOfFrozenIndices, other.numberOfFrozenIndices); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/action/FreezeIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/action/FreezeIndexAction.java new file mode 100644 index 0000000000000..aefbe670d6327 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/action/FreezeIndexAction.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.frozen.action; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.protocol.xpack.frozen.FreezeResponse; + +public class FreezeIndexAction extends ActionType { + + public static final FreezeIndexAction INSTANCE = new FreezeIndexAction(); + public static final String NAME = "indices:admin/freeze"; + + private FreezeIndexAction() { + super(NAME, FreezeResponse::new); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java index 4b1cebedd664e..58814604243e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java @@ -5,20 +5,15 @@ */ package org.elasticsearch.xpack.core.graph.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; -public class GraphExploreAction extends StreamableResponseActionType { +public class GraphExploreAction extends ActionType { public static final GraphExploreAction INSTANCE = new GraphExploreAction(); public static final String NAME = "indices:data/read/xpack/graph/explore"; private GraphExploreAction() { - super(NAME); + super(NAME, GraphExploreResponse::new); } - - @Override - public GraphExploreResponse newResponse() { - return new GraphExploreResponse(); - } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index efe57f44e89dc..bd159e6fc3368 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -349,31 +349,44 @@ private void onSearchResponse(SearchResponse searchResponse) { } final List docs = iterationResult.getToIndex(); - final BulkRequest bulkRequest = new BulkRequest(); - docs.forEach(bulkRequest::add); - - // TODO this might be a valid case, e.g. if implementation filters - assert bulkRequest.requests().size() > 0; - - stats.markStartIndexing(); - doNextBulk(bulkRequest, ActionListener.wrap(bulkResponse -> { - // TODO we should check items in the response and move after accordingly to - // resume the failing buckets ? - if (bulkResponse.hasFailures()) { - logger.warn("Error while attempting to bulk index documents: " + bulkResponse.buildFailureMessage()); - } - stats.incrementNumOutputDocuments(bulkResponse.getItems().length); - - // check if indexer has been asked to stop, state {@link IndexerState#STOPPING} - if (checkState(getState()) == false) { - return; - } - JobPosition newPosition = iterationResult.getPosition(); - position.set(newPosition); + // an iteration result might return an empty set of documents to be indexed + if (docs.isEmpty() == false) { + final BulkRequest bulkRequest = new BulkRequest(); + docs.forEach(bulkRequest::add); + + stats.markStartIndexing(); + doNextBulk(bulkRequest, ActionListener.wrap(bulkResponse -> { + // TODO we should check items in the response and move after accordingly to + // resume the failing buckets ? + if (bulkResponse.hasFailures()) { + logger.warn("Error while attempting to bulk index documents: " + bulkResponse.buildFailureMessage()); + } + stats.incrementNumOutputDocuments(bulkResponse.getItems().length); + + // check if indexer has been asked to stop, state {@link IndexerState#STOPPING} + if (checkState(getState()) == false) { + return; + } + + JobPosition newPosition = iterationResult.getPosition(); + position.set(newPosition); + + onBulkResponse(bulkResponse, newPosition); + }, this::finishWithIndexingFailure)); + } else { + // no documents need to be indexed, continue with search + try { + JobPosition newPosition = iterationResult.getPosition(); + position.set(newPosition); - onBulkResponse(bulkResponse, newPosition); - }, this::finishWithIndexingFailure)); + ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithSearchFailure); + nextSearch(listener); + } catch (Exception e) { + finishAndSetState(); + onFailure(e); + } + } } catch (Exception e) { finishWithSearchFailure(e); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java index 915ca17cb43a4..71d8f8f77eed6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java @@ -49,7 +49,15 @@ public static ExplainLifecycleResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } - public ExplainLifecycleResponse() { + public ExplainLifecycleResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + Map indexResponses = new HashMap<>(size); + for (int i = 0; i < size; i++) { + IndexLifecycleExplainResponse indexResponse = new IndexLifecycleExplainResponse(in); + indexResponses.put(indexResponse.getIndex(), indexResponse); + } + this.indexResponses = indexResponses; } public ExplainLifecycleResponse(Map indexResponses) { @@ -78,17 +86,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - int size = in.readVInt(); - Map indexResponses = new HashMap<>(size); - for (int i = 0; i < size; i++) { - IndexLifecycleExplainResponse indexResponse = new IndexLifecycleExplainResponse(in); - indexResponses.put(indexResponse.getIndex(), indexResponse); - } - this.indexResponses = indexResponses; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(indexResponses.size()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStep.java index ae7b0af6222dc..f5e44a863cbf1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStep.java @@ -9,7 +9,8 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; +import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; +import org.elasticsearch.xpack.core.frozen.action.FreezeIndexAction; /** * Freezes an index. @@ -23,8 +24,8 @@ public FreezeStep(StepKey key, StepKey nextStepKey, Client client) { @Override public void performDuringNoSnapshot(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { - getClient().admin().indices().execute(TransportFreezeIndexAction.FreezeIndexAction.INSTANCE, - new TransportFreezeIndexAction.FreezeRequest(indexMetaData.getIndex().getName()), + getClient().admin().indices().execute(FreezeIndexAction.INSTANCE, + new FreezeRequest(indexMetaData.getIndex().getName()), ActionListener.wrap(response -> listener.onResponse(true), listener::onFailure)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java index fd171c88539c3..9326325054fe2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,6 +43,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl private static final ParseField STEP_TIME_FIELD = new ParseField("step_time"); private static final ParseField STEP_INFO_FIELD = new ParseField("step_info"); private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution"); + private static final ParseField AGE_FIELD = new ParseField("age"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "index_lifecycle_explain_response", @@ -58,7 +60,9 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl (Long) (a[9]), (Long) (a[10]), (BytesReference) a[11], - (PhaseExecutionInfo) a[12])); + (PhaseExecutionInfo) a[12] + // a[13] == "age" + )); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_FIELD); PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), MANAGED_BY_ILM_FIELD); @@ -78,6 +82,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl }, STEP_INFO_FIELD); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), PHASE_EXECUTION_INFO); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), AGE_FIELD); } private final String index; @@ -243,6 +248,14 @@ public PhaseExecutionInfo getPhaseExecutionInfo() { return phaseExecutionInfo; } + public TimeValue getAge() { + if (lifecycleDate == null) { + return TimeValue.MINUS_ONE; + } else { + return TimeValue.timeValueMillis(System.currentTimeMillis() - lifecycleDate); + } + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -252,6 +265,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); if (lifecycleDate != null) { builder.timeField(LIFECYCLE_DATE_MILLIS_FIELD.getPreferredName(), LIFECYCLE_DATE_FIELD.getPreferredName(), lifecycleDate); + builder.field(AGE_FIELD.getPreferredName(), getAge().toHumanReadableString(2)); } if (phase != null) { builder.field(PHASE_FIELD.getPreferredName(), phase); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java index 3cfd8556244a9..0a157b8197a10 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java @@ -16,10 +16,15 @@ public class LifecycleSettings { public static final String LIFECYCLE_NAME = "index.lifecycle.name"; public static final String LIFECYCLE_INDEXING_COMPLETE = "index.lifecycle.indexing_complete"; + public static final String SLM_HISTORY_INDEX_ENABLED = "slm.history_index_enabled"; + public static final Setting LIFECYCLE_POLL_INTERVAL_SETTING = Setting.timeSetting(LIFECYCLE_POLL_INTERVAL, TimeValue.timeValueMinutes(10), TimeValue.timeValueSeconds(1), Setting.Property.Dynamic, Setting.Property.NodeScope); public static final Setting LIFECYCLE_NAME_SETTING = Setting.simpleString(LIFECYCLE_NAME, Setting.Property.Dynamic, Setting.Property.IndexScope); public static final Setting LIFECYCLE_INDEXING_COMPLETE_SETTING = Setting.boolSetting(LIFECYCLE_INDEXING_COMPLETE, false, Setting.Property.Dynamic, Setting.Property.IndexScope); + + public static final Setting SLM_HISTORY_INDEX_ENABLED_SETTING = Setting.boolSetting(SLM_HISTORY_INDEX_ENABLED, true, + Setting.Property.NodeScope); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java index de38a5e092ae2..5f54686805b1e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java @@ -8,9 +8,17 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; public class StartILMRequest extends AcknowledgedRequest { + public StartILMRequest(StreamInput in) throws IOException { + super(in); + + } + public StartILMRequest() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java index 3a2d458406b30..14d07150f614d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java @@ -8,9 +8,17 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; public class StopILMRequest extends AcknowledgedRequest { + public StopILMRequest(StreamInput in) throws IOException { + super(in); + + } + public StopILMRequest() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java index 46504dd305d7d..a845c2a5ff479 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java @@ -98,7 +98,7 @@ public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { // Similarly, if isWriteIndex is false (see note above on false vs. null), we can't roll over this index, so error out. if (Boolean.FALSE.equals(isWriteIndex)) { listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, - "index [%s] is not the write index for alias [%s]", rolloverAlias, indexMetaData.getIndex().getName()))); + "index [%s] is not the write index for alias [%s]", indexMetaData.getIndex().getName(), rolloverAlias))); } RolloverRequest rolloverRequest = new RolloverRequest(rolloverAlias, null); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java index f9f5893bcb0cc..943a711cbc490 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java @@ -6,14 +6,13 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import java.io.IOException; @@ -24,12 +23,7 @@ public class DeleteLifecycleAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, DeleteLifecycleAction.Response::new); } public static class Response extends AcknowledgedResponse implements ToXContentObject { @@ -53,6 +47,11 @@ public Request(String policyName) { this.policyName = policyName; } + public Request(StreamInput in) throws IOException { + super(in); + policyName = in.readString(); + } + public Request() { } @@ -65,12 +64,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - policyName = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java index bb53f42040cc3..7df2e04310afa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java @@ -6,20 +6,15 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; -public class ExplainLifecycleAction extends StreamableResponseActionType { +public class ExplainLifecycleAction extends ActionType { public static final ExplainLifecycleAction INSTANCE = new ExplainLifecycleAction(); public static final String NAME = "indices:admin/ilm/explain"; protected ExplainLifecycleAction() { - super(NAME); - } - - @Override - public ExplainLifecycleResponse newResponse() { - return new ExplainLifecycleResponse(); + super(NAME, ExplainLifecycleResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java index b34889f5f1351..ba13027dcb049 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -23,24 +23,21 @@ import java.util.List; import java.util.Objects; -public class GetLifecycleAction extends StreamableResponseActionType { +public class GetLifecycleAction extends ActionType { public static final GetLifecycleAction INSTANCE = new GetLifecycleAction(); public static final String NAME = "cluster:admin/ilm/get"; protected GetLifecycleAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, GetLifecycleAction.Response::new); } public static class Response extends ActionResponse implements ToXContentObject { private List policies; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + this.policies = in.readList(LifecyclePolicyResponseItem::new); } public Response(List policies) { @@ -65,11 +62,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - this.policies = in.readList(LifecyclePolicyResponseItem::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(policies); @@ -109,6 +101,11 @@ public Request(String... policyNames) { this.policyNames = policyNames; } + public Request(StreamInput in) throws IOException { + super(in); + policyNames = in.readStringArray(); + } + public Request() { policyNames = Strings.EMPTY_ARRAY; } @@ -122,12 +119,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - policyNames = in.readStringArray(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java index eaa3e9a468faf..74e3128548def 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -20,24 +20,21 @@ import java.io.IOException; import java.util.Objects; -public class GetStatusAction extends StreamableResponseActionType { +public class GetStatusAction extends ActionType { public static final GetStatusAction INSTANCE = new GetStatusAction(); public static final String NAME = "cluster:admin/ilm/operation_mode/get"; protected GetStatusAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, GetStatusAction.Response::new); } public static class Response extends ActionResponse implements ToXContentObject { private OperationMode mode; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + mode = in.readEnum(OperationMode.class); } public Response(OperationMode mode) { @@ -56,11 +53,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - mode = in.readEnum(OperationMode.class); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeEnum(mode); @@ -92,6 +84,10 @@ public String toString() { public static class Request extends AcknowledgedRequest { + public Request(StreamInput in) throws IOException { + super(in); + } + public Request() { } @@ -100,11 +96,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java index 8e2f04c66fc24..26417e809a44f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java @@ -6,15 +6,14 @@ */ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -29,12 +28,7 @@ public class MoveToStepAction extends ActionType { public static final String NAME = "cluster:admin/ilm/_move/post"; protected MoveToStepAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, MoveToStepAction.Response::new); } public static class Response extends AcknowledgedResponse implements ToXContentObject { @@ -73,6 +67,13 @@ public Request(String index, StepKey currentStepKey, StepKey nextStepKey) { this.nextStepKey = nextStepKey; } + public Request(StreamInput in) throws IOException { + super(in); + this.index = in.readString(); + this.currentStepKey = new StepKey(in); + this.nextStepKey = new StepKey(in); + } + public Request() { } @@ -97,14 +98,6 @@ public static Request parseRequest(String name, XContentParser parser) { return PARSER.apply(parser, name); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.index = in.readString(); - this.currentStepKey = new StepKey(in); - this.nextStepKey = new StepKey(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java index df46356ac20e2..d557ca1573d9c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -28,12 +27,7 @@ public class PutLifecycleAction extends ActionType public static final String NAME = "cluster:admin/ilm/put"; protected PutLifecycleAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, PutLifecycleAction.Response::new); } public static class Response extends AcknowledgedResponse implements ToXContentObject { @@ -62,6 +56,11 @@ public Request(LifecyclePolicy policy) { this.policy = policy; } + public Request(StreamInput in) throws IOException { + super(in); + policy = new LifecyclePolicy(in); + } + public Request() { } @@ -86,12 +85,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - policy = new LifecyclePolicy(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java index 42b45762722af..76996b0879931 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.ParseField; @@ -24,17 +24,12 @@ import java.util.List; import java.util.Objects; -public class RemoveIndexLifecyclePolicyAction extends StreamableResponseActionType { +public class RemoveIndexLifecyclePolicyAction extends ActionType { public static final RemoveIndexLifecyclePolicyAction INSTANCE = new RemoveIndexLifecyclePolicyAction(); public static final String NAME = "indices:admin/ilm/remove_policy"; protected RemoveIndexLifecyclePolicyAction() { - super(NAME); - } - - @Override - public RemoveIndexLifecyclePolicyAction.Response newResponse() { - return new Response(); + super(NAME, RemoveIndexLifecyclePolicyAction.Response::new); } public static class Response extends ActionResponse implements ToXContentObject { @@ -52,7 +47,9 @@ public static class Response extends ActionResponse implements ToXContentObject private List failedIndexes; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + failedIndexes = in.readStringList(); } public Response(List failedIndexes) { @@ -79,12 +76,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - failedIndexes = in.readStringList(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(failedIndexes); @@ -114,6 +105,12 @@ public static class Request extends AcknowledgedRequest implements Indi private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + public Request(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + public Request() { } @@ -148,13 +145,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java index f9bcb3474fbec..00fc8a5fd6ad6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -15,7 +15,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import java.io.IOException; @@ -27,12 +26,7 @@ public class RetryAction extends ActionType { public static final String NAME = "indices:admin/ilm/retry"; protected RetryAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, RetryAction.Response::new); } public static class Response extends AcknowledgedResponse implements ToXContentObject { @@ -54,6 +48,12 @@ public Request(String... indices) { this.indices = indices; } + public Request(StreamInput in) throws IOException { + super(in); + this.indices = in.readStringArray(); + this.indicesOptions = IndicesOptions.readIndicesOptions(in); + } + public Request() { } @@ -83,13 +83,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.indices = in.readStringArray(); - this.indicesOptions = IndicesOptions.readIndicesOptions(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java index 2145c5cd53d36..d06039cc16287 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java @@ -8,19 +8,12 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class StartILMAction extends ActionType { public static final StartILMAction INSTANCE = new StartILMAction(); public static final String NAME = "cluster:admin/ilm/start"; protected StartILMAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java index 09228322fce58..48f711fadc96c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java @@ -8,19 +8,12 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class StopILMAction extends ActionType { public static final StopILMAction INSTANCE = new StopILMAction(); public static final String NAME = "cluster:admin/ilm/stop"; protected StopILMAction() { - super(NAME); + super(NAME, AcknowledgedResponse::new); } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index 9ac63f026b089..11dc2e77c27f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -193,7 +193,7 @@ public static Set unallocatedJobIds(@Nullable PersistentTasksCustomMetaD * @param nodes The cluster nodes * @return Unallocated job tasks */ - public static Collection unallocatedJobTasks( + public static Collection> unallocatedJobTasks( @Nullable PersistentTasksCustomMetaData tasks, DiscoveryNodes nodes) { if (tasks == null) { @@ -247,7 +247,7 @@ public static Set unallocatedDatafeedIds(@Nullable PersistentTasksCustom * @param nodes The cluster nodes * @return Unallocated datafeed tasks */ - public static Collection unallocatedDatafeedTasks( + public static Collection> unallocatedDatafeedTasks( @Nullable PersistentTasksCustomMetaData tasks, DiscoveryNodes nodes) { if (tasks == null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java index 1ee538bad6ce5..4e19af64758ac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java @@ -32,12 +32,7 @@ public class CloseJobAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/close"; private CloseJobAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, CloseJobAction.Response::new); } public static class Request extends BaseTasksRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java index 044f13001e739..911584ac9fbd4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.ml.calendars.Calendar; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -26,12 +25,7 @@ public class DeleteCalendarAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/calendars/delete"; private DeleteCalendarAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest { @@ -39,6 +33,11 @@ public static class Request extends AcknowledgedRequest { private String calendarId; + public Request(StreamInput in) throws IOException { + super(in); + calendarId = in.readString(); + } + public Request() { } @@ -55,12 +54,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - calendarId = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java index a39a0cec3c9ab..8502df128463b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.ml.calendars.Calendar; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -27,18 +26,19 @@ public class DeleteCalendarEventAction extends ActionType public static final String NAME = "cluster:admin/xpack/ml/calendars/events/delete"; private DeleteCalendarEventAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest { private String calendarId; private String eventId; + public Request(StreamInput in) throws IOException { + super(in); + calendarId = in.readString(); + eventId = in.readString(); + } + public Request() { } @@ -60,13 +60,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - calendarId = in.readString(); - eventId = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java index 7aea3fc4298e4..c9c16025bb69c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; @@ -28,12 +27,7 @@ public class DeleteDataFrameAnalyticsAction extends ActionType getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest implements ToXContentFragment { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java index 34640a4b5a8e1..7d8817a195e7a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; @@ -14,7 +14,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -29,12 +28,7 @@ public class DeleteDatafeedAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/datafeeds/delete"; private DeleteDatafeedAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest implements ToXContentFragment { @@ -51,6 +45,12 @@ public Request(String datafeedId) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + datafeedId = in.readString(); + force = in.readBoolean(); + } + public String getDatafeedId() { return datafeedId; } @@ -68,13 +68,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - datafeedId = in.readString(); - force = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java index 20153f88b3ab0..5810b946e96ed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -20,24 +20,23 @@ import java.io.IOException; import java.util.Objects; -public class DeleteExpiredDataAction extends StreamableResponseActionType { +public class DeleteExpiredDataAction extends ActionType { public static final DeleteExpiredDataAction INSTANCE = new DeleteExpiredDataAction(); public static final String NAME = "cluster:admin/xpack/ml/delete_expired_data"; private DeleteExpiredDataAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest { public Request() {} + public Request(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; @@ -61,11 +60,8 @@ public Response(boolean deleted) { this.deleted = deleted; } - public Response() {} - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public Response(StreamInput in) throws IOException { + super(in); deleted = in.readBoolean(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java index ff591b97d0d5a..9630615c3cf53 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; @@ -14,7 +14,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; @@ -27,12 +26,7 @@ public class DeleteFilterAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/filters/delete"; private DeleteFilterAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest { @@ -41,6 +35,11 @@ public static class Request extends AcknowledgedRequest { private String filterId; + public Request(StreamInput in) throws IOException { + super(in); + filterId = in.readString(); + } + public Request() { } @@ -58,12 +57,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - filterId = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java index 98dcc43ac5a99..fb1a3f39da3d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -26,12 +25,7 @@ public class DeleteForecastAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/forecast/delete"; private DeleteForecastAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest { @@ -40,6 +34,13 @@ public static class Request extends AcknowledgedRequest { private String forecastId; private boolean allowNoForecasts = true; + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + forecastId = in.readString(); + allowNoForecasts = in.readBoolean(); + } + public Request() { } @@ -69,14 +70,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - forecastId = in.readString(); - allowNoForecasts = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java index 14a8df580b83b..2d3c8bf7f0eed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -30,12 +29,7 @@ public class DeleteJobAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/delete"; private DeleteJobAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest { @@ -54,6 +48,12 @@ public Request(String jobId) { public Request() {} + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + force = in.readBoolean(); + } + public String getJobId() { return jobId; } @@ -92,13 +92,6 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new JobDeletionTask(id, type, action, "delete-job-" + jobId, parentTaskId, headers); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - force = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java index c97739ac77ee8..1c9672aaddf19 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotField; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -26,12 +25,7 @@ public class DeleteModelSnapshotAction extends ActionType public static final String NAME = "cluster:admin/xpack/ml/job/model_snapshots/delete"; private DeleteModelSnapshotAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends ActionRequest { @@ -42,6 +36,12 @@ public static class Request extends ActionRequest { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + snapshotId = in.readString(); + } + public Request(String jobId, String snapshotId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, ModelSnapshotField.SNAPSHOT_ID.getPreferredName()); @@ -60,13 +60,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - snapshotId = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java index 9be2c08fce003..04b5d084a768d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -29,18 +29,13 @@ import java.util.List; import java.util.Objects; -public class EvaluateDataFrameAction extends StreamableResponseActionType { +public class EvaluateDataFrameAction extends ActionType { public static final EvaluateDataFrameAction INSTANCE = new EvaluateDataFrameAction(); public static final String NAME = "cluster:monitor/xpack/ml/data_frame/evaluate"; private EvaluateDataFrameAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, EvaluateDataFrameAction.Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -79,6 +74,12 @@ private Request(List indices, Evaluation evaluation) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + evaluation = in.readNamedWriteable(Evaluation.class); + } + public String[] getIndices() { return indices; } @@ -104,13 +105,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indices = in.readStringArray(); - evaluation = in.readNamedWriteable(Evaluation.class); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -155,7 +149,10 @@ public static class Response extends ActionResponse implements ToXContentObject private String evaluationName; private List metrics; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + this.evaluationName = in.readString(); + this.metrics = in.readNamedWriteableList(EvaluationMetricResult.class); } public Response(String evaluationName, List metrics) { @@ -163,13 +160,6 @@ public Response(String evaluationName, List metrics) { this.metrics = Objects.requireNonNull(metrics); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.evaluationName = in.readString(); - this.metrics = in.readNamedWriteableList(EvaluationMetricResult.class); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(evaluationName); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java index e007ca42602a7..4dad5bb6dce41 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; @@ -23,12 +22,7 @@ public class FinalizeJobExecutionAction extends ActionType public static final String NAME = "cluster:internal/xpack/ml/job/finalize_job_execution"; private FinalizeJobExecutionAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends MasterNodeRequest { @@ -42,14 +36,13 @@ public Request(String[] jobIds) { public Request() { } - public String[] getJobIds() { - return jobIds; + public Request(StreamInput in) throws IOException { + super(in); + jobIds = in.readStringArray(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobIds = in.readStringArray(); + public String[] getJobIds() { + return jobIds; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java index 7baa907f2e521..a55764395c744 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; @@ -31,18 +31,13 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; -public class FindFileStructureAction extends StreamableResponseActionType { +public class FindFileStructureAction extends ActionType { public static final FindFileStructureAction INSTANCE = new FindFileStructureAction(); public static final String NAME = "cluster:monitor/xpack/ml/findfilestructure"; private FindFileStructureAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } static class RequestBuilder extends ActionRequestBuilder { @@ -60,16 +55,8 @@ public Response(FileStructure fileStructure) { this.fileStructure = fileStructure; } - Response() { - } - - public FileStructure getFileStructure() { - return fileStructure; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + Response(StreamInput in) throws IOException { + super(in); fileStructure = new FileStructure(in); } @@ -148,6 +135,27 @@ public static class Request extends ActionRequest { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + linesToSample = in.readOptionalVInt(); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + lineMergeSizeLimit = in.readOptionalVInt(); + } + timeout = in.readOptionalTimeValue(); + charset = in.readOptionalString(); + format = in.readBoolean() ? in.readEnum(FileStructure.Format.class) : null; + columnNames = in.readBoolean() ? in.readStringList() : null; + hasHeaderRow = in.readOptionalBoolean(); + delimiter = in.readBoolean() ? (char) in.readVInt() : null; + quote = in.readBoolean() ? (char) in.readVInt() : null; + shouldTrimFields = in.readOptionalBoolean(); + grokPattern = in.readOptionalString(); + timestampFormat = in.readOptionalString(); + timestampField = in.readOptionalString(); + sample = in.readBytesReference(); + } + + public Integer getLinesToSample() { return linesToSample; } @@ -334,32 +342,11 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - linesToSample = in.readOptionalVInt(); - if (in.getVersion().onOrAfter(Version.CURRENT)) { - lineMergeSizeLimit = in.readOptionalVInt(); - } - timeout = in.readOptionalTimeValue(); - charset = in.readOptionalString(); - format = in.readBoolean() ? in.readEnum(FileStructure.Format.class) : null; - columnNames = in.readBoolean() ? in.readStringList() : null; - hasHeaderRow = in.readOptionalBoolean(); - delimiter = in.readBoolean() ? (char) in.readVInt() : null; - quote = in.readBoolean() ? (char) in.readVInt() : null; - shouldTrimFields = in.readOptionalBoolean(); - grokPattern = in.readOptionalString(); - timestampFormat = in.readOptionalString(); - timestampField = in.readOptionalString(); - sample = in.readBytesReference(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalVInt(linesToSample); - if (out.getVersion().onOrAfter(Version.CURRENT)) { + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { out.writeOptionalVInt(lineMergeSizeLimit); } out.writeOptionalTimeValue(timeout); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index f549049efa7da..99f8aca8952ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -31,12 +31,7 @@ public class FlushJobAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/flush"; private FlushJobAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, FlushJobAction.Response::new); } public static class Request extends JobTaskRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java index 0e4d9f1028310..574ca2dc271bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java @@ -30,12 +30,7 @@ public class ForecastJobAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/forecast"; private ForecastJobAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, ForecastJobAction.Response::new); } public static class Request extends JobTaskRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java index 860cdc9b79911..9c16439abc237 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,18 +28,13 @@ import java.io.IOException; import java.util.Objects; -public class GetBucketsAction extends StreamableResponseActionType { +public class GetBucketsAction extends ActionType { public static final GetBucketsAction INSTANCE = new GetBucketsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/buckets/get"; private GetBucketsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -90,6 +85,20 @@ public static Request parseRequest(String jobId, XContentParser parser) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + timestamp = in.readOptionalString(); + expand = in.readBoolean(); + excludeInterim = in.readBoolean(); + start = in.readOptionalString(); + end = in.readOptionalString(); + anomalyScore = in.readOptionalDouble(); + pageParams = in.readOptionalWriteable(PageParams::new); + sort = in.readString(); + descending = in.readBoolean(); + } + public Request(String jobId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); } @@ -199,21 +208,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - timestamp = in.readOptionalString(); - expand = in.readBoolean(); - excludeInterim = in.readBoolean(); - start = in.readOptionalString(); - end = in.readOptionalString(); - anomalyScore = in.readOptionalDouble(); - pageParams = in.readOptionalWriteable(PageParams::new); - sort = in.readString(); - descending = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -292,7 +286,8 @@ static class RequestBuilder extends ActionRequestBuilder { public static class Response extends AbstractGetResourcesResponse implements ToXContentObject { - public Response() { + public Response(StreamInput in) throws IOException { + super(in); } public Response(QueryPage buckets) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java index 1e4a4f60ae72c..ac2839373dc16 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; @@ -29,17 +29,12 @@ import java.io.IOException; import java.util.Objects; -public class GetCalendarEventsAction extends StreamableResponseActionType { +public class GetCalendarEventsAction extends ActionType { public static final GetCalendarEventsAction INSTANCE = new GetCalendarEventsAction(); public static final String NAME = "cluster:monitor/xpack/ml/calendars/events/get"; private GetCalendarEventsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -74,6 +69,15 @@ public static Request parseRequest(String calendarId, XContentParser parser) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + calendarId = in.readString(); + start = in.readOptionalString(); + end = in.readOptionalString(); + jobId = in.readOptionalString(); + pageParams = new PageParams(in); + } + public Request(String calendarId) { setCalendarId(calendarId); } @@ -129,16 +133,6 @@ public ActionRequestValidationException validate() { return e; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - calendarId = in.readString(); - start = in.readOptionalString(); - end = in.readOptionalString(); - jobId = in.readOptionalString(); - pageParams = new PageParams(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -196,7 +190,8 @@ public RequestBuilder(ElasticsearchClient client) { public static class Response extends AbstractGetResourcesResponse implements ToXContentObject { - public Response() { + public Response(StreamInput in) throws IOException { + super(in); } public Response(QueryPage scheduledEvents) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java index 29e2fea89a83e..22874beafe5a2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -28,18 +28,13 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; -public class GetCalendarsAction extends StreamableResponseActionType { +public class GetCalendarsAction extends ActionType { public static final GetCalendarsAction INSTANCE = new GetCalendarsAction(); public static final String NAME = "cluster:monitor/xpack/ml/calendars/get"; private GetCalendarsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -67,6 +62,12 @@ public static Request parseRequest(String calendarId, XContentParser parser) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + calendarId = in.readOptionalString(); + pageParams = in.readOptionalWriteable(PageParams::new); + } + public void setCalendarId(String calendarId) { this.calendarId = calendarId; } @@ -96,13 +97,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - calendarId = in.readOptionalString(); - pageParams = in.readOptionalWriteable(PageParams::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -154,7 +148,8 @@ public Response(QueryPage calendars) { super(calendars); } - public Response() { + public Response(StreamInput in) throws IOException { + super(in); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java index 575a8c1b5aacf..e37cbd77bc69c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,18 +29,13 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; -public class GetCategoriesAction extends StreamableResponseActionType { +public class GetCategoriesAction extends ActionType { public static final GetCategoriesAction INSTANCE = new GetCategoriesAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/categories/get"; private GetCategoriesAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -76,6 +71,13 @@ public Request(String jobId) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + categoryId = in.readOptionalLong(); + pageParams = in.readOptionalWriteable(PageParams::new); + } + public String getJobId() { return jobId; } public PageParams getPageParams() { return pageParams; } @@ -109,14 +111,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - categoryId = in.readOptionalLong(); - pageParams = in.readOptionalWriteable(PageParams::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -170,7 +164,8 @@ public Response(QueryPage result) { super(result); } - public Response() { + public Response(StreamInput in) throws IOException { + super(in); } public QueryPage getResult() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsAction.java index 3873cdac96428..efa6430489b7d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsAction.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -16,20 +16,14 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import java.io.IOException; -import java.util.Collections; -public class GetDataFrameAnalyticsAction extends StreamableResponseActionType { +public class GetDataFrameAnalyticsAction extends ActionType { public static final GetDataFrameAnalyticsAction INSTANCE = new GetDataFrameAnalyticsAction(); public static final String NAME = "cluster:monitor/xpack/ml/data_frame/analytics/get"; private GetDataFrameAnalyticsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(new QueryPage<>(Collections.emptyList(), 0, Response.RESULTS_FIELD)); + super(NAME, Response::new); } public static class Request extends AbstractGetResourcesRequest { @@ -46,7 +40,7 @@ public Request(String id) { } public Request(StreamInput in) throws IOException { - readFrom(in); + super(in); } @Override @@ -59,7 +53,9 @@ public static class Response extends AbstractGetResourcesResponse analytics) { super(analytics); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java index a0e70463c52e1..fb67cb0f965b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java @@ -41,12 +41,7 @@ public class GetDataFrameAnalyticsStatsAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, GetDataFrameAnalyticsStatsAction.Response::new); } public static class Request extends BaseTasksRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java index 435434ab93d38..69abd50a2ae4c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.client.ElasticsearchClient; @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.Objects; -public class GetDatafeedsAction extends StreamableResponseActionType { +public class GetDatafeedsAction extends ActionType { public static final GetDatafeedsAction INSTANCE = new GetDatafeedsAction(); public static final String NAME = "cluster:monitor/xpack/ml/datafeeds/get"; @@ -30,12 +30,7 @@ public class GetDatafeedsAction extends StreamableResponseActionType { @@ -84,11 +79,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public int hashCode() { return Objects.hash(datafeedId, allowNoDatafeeds); @@ -120,7 +110,9 @@ public Response(QueryPage datafeeds) { super(datafeeds); } - public Response() {} + public Response(StreamInput in) throws IOException { + super(in); + } public QueryPage getResponse() { return getResources(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java index e07ad3ba31dc5..91c2eee6a27cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.client.ElasticsearchClient; @@ -24,28 +24,28 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; import static org.elasticsearch.Version.V_7_4_0; -public class GetDatafeedsStatsAction extends StreamableResponseActionType { +public class GetDatafeedsStatsAction extends ActionType { public static final GetDatafeedsStatsAction INSTANCE = new GetDatafeedsStatsAction(); public static final String NAME = "cluster:monitor/xpack/ml/datafeeds/stats/get"; public static final String ALL = "_all"; private static final String STATE = "state"; + private static final String NODE = "node"; + private static final String ASSIGNMENT_EXPLANATION = "assignment_explanation"; + private static final String TIMING_STATS = "timing_stats"; private GetDatafeedsStatsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends MasterNodeReadRequest { @@ -91,11 +91,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public int hashCode() { return Objects.hash(datafeedId, allowNoDatafeeds); @@ -181,7 +176,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(DatafeedConfig.ID.getPreferredName(), datafeedId); builder.field(STATE, datafeedState.toString()); if (node != null) { - builder.startObject("node"); + builder.startObject(NODE); builder.field("id", node.getId()); builder.field("name", node.getName()); builder.field("ephemeral_id", node.getEphemeralId()); @@ -197,10 +192,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } if (assignmentExplanation != null) { - builder.field("assignment_explanation", assignmentExplanation); + builder.field(ASSIGNMENT_EXPLANATION, assignmentExplanation); } if (timingStats != null) { - builder.field("timing_stats", timingStats); + builder.field( + TIMING_STATS, + timingStats, + new MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_CALCULATED_FIELDS, "true"))); } builder.endObject(); return builder; @@ -243,7 +241,9 @@ public Response(QueryPage datafeedsStats) { super(datafeedsStats); } - public Response() {} + public Response(StreamInput in) throws IOException { + super(in); + } public QueryPage getResponse() { return getResources(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java index d3ec3f2e32455..de22800832dfc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java @@ -7,8 +7,9 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.action.AbstractGetResourcesRequest; @@ -17,21 +18,18 @@ import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import java.io.IOException; + import static org.elasticsearch.action.ValidateActions.addValidationError; -public class GetFiltersAction extends StreamableResponseActionType { +public class GetFiltersAction extends ActionType { public static final GetFiltersAction INSTANCE = new GetFiltersAction(); public static final String NAME = "cluster:admin/xpack/ml/filters/get"; private GetFiltersAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends AbstractGetResourcesRequest { @@ -41,6 +39,10 @@ public Request() { super(null, null, true); } + public Request(StreamInput in) throws IOException { + super(in); + } + public void setFilterId(String filterId) { setResourceId(filterId); } @@ -79,7 +81,8 @@ public Response(QueryPage filters) { super(filters); } - public Response() { + public Response(StreamInput in) throws IOException { + super(in); } public QueryPage getFilters() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java index 2042d581a2239..e70af2ddf2748 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,18 +27,13 @@ import java.io.IOException; import java.util.Objects; -public class GetInfluencersAction extends StreamableResponseActionType { +public class GetInfluencersAction extends ActionType { public static final GetInfluencersAction INSTANCE = new GetInfluencersAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/influencers/get"; private GetInfluencersAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -83,6 +78,18 @@ public static Request parseRequest(String jobId, XContentParser parser) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + excludeInterim = in.readBoolean(); + pageParams = new PageParams(in); + start = in.readOptionalString(); + end = in.readOptionalString(); + sort = in.readOptionalString(); + descending = in.readBoolean(); + influencerScore = in.readDouble(); + } + public Request(String jobId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); } @@ -152,19 +159,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - excludeInterim = in.readBoolean(); - pageParams = new PageParams(in); - start = in.readOptionalString(); - end = in.readOptionalString(); - sort = in.readOptionalString(); - descending = in.readBoolean(); - influencerScore = in.readDouble(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -229,6 +223,10 @@ public static class Response extends AbstractGetResourcesResponse im public Response() { } + public Response(StreamInput in) throws IOException { + super(in); + } + public Response(QueryPage influencers) { super(influencers); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java index 5d92a356d551c..a1080b375011c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.client.ElasticsearchClient; @@ -22,18 +22,13 @@ import java.io.IOException; import java.util.Objects; -public class GetJobsAction extends StreamableResponseActionType { +public class GetJobsAction extends ActionType { public static final GetJobsAction INSTANCE = new GetJobsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/get"; private GetJobsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends MasterNodeReadRequest { @@ -82,11 +77,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public int hashCode() { return Objects.hash(jobId, allowNoJobs); @@ -118,7 +108,9 @@ public Response(QueryPage jobs) { super(jobs); } - public Response() {} + public Response(StreamInput in) throws IOException { + super(in); + } public QueryPage getResponse() { return getResources(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index 558b166315e80..3b3152e9a48a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; import org.elasticsearch.xpack.core.ml.stats.ForecastStats; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; import java.util.Collections; @@ -51,15 +52,12 @@ public class GetJobsStatsAction extends ActionType private static final String FORECASTS_STATS = "forecasts_stats"; private static final String STATE = "state"; private static final String NODE = "node"; + private static final String ASSIGNMENT_EXPLANATION = "assignment_explanation"; + private static final String OPEN_TIME = "open_time"; private static final String TIMING_STATS = "timing_stats"; private GetJobsStatsAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, GetJobsStatsAction.Response::new); } public static class Request extends BaseTasksRequest { @@ -266,13 +264,16 @@ public XContentBuilder toUnwrappedXContent(XContentBuilder builder) throws IOExc builder.endObject(); } if (assignmentExplanation != null) { - builder.field("assignment_explanation", assignmentExplanation); + builder.field(ASSIGNMENT_EXPLANATION, assignmentExplanation); } if (openTime != null) { - builder.field("open_time", openTime.getStringRep()); + builder.field(OPEN_TIME, openTime.getStringRep()); } if (timingStats != null) { - builder.field(TIMING_STATS, timingStats); + builder.field( + TIMING_STATS, + timingStats, + new MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_CALCULATED_FIELDS, "true"))); } return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java index 0b1bee54a3a0d..ddd374dc2bfc7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -28,18 +28,13 @@ import java.io.IOException; import java.util.Objects; -public class GetModelSnapshotsAction extends StreamableResponseActionType { +public class GetModelSnapshotsAction extends ActionType { public static final GetModelSnapshotsAction INSTANCE = new GetModelSnapshotsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/model_snapshots/get"; private GetModelSnapshotsAction() { - super(NAME); - } - - @Override - public GetModelSnapshotsAction.Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -84,6 +79,17 @@ public static Request parseRequest(String jobId, String snapshotId, XContentPars public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + snapshotId = in.readOptionalString(); + sort = in.readOptionalString(); + start = in.readOptionalString(); + end = in.readOptionalString(); + desc = in.readBoolean(); + pageParams = new PageParams(in); + } + public Request(String jobId, String snapshotId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); this.snapshotId = snapshotId; @@ -146,18 +152,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - snapshotId = in.readOptionalString(); - sort = in.readOptionalString(); - start = in.readOptionalString(); - end = in.readOptionalString(); - desc = in.readBoolean(); - pageParams = new PageParams(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -221,7 +215,8 @@ public Response(QueryPage page) { super(page); } - public Response() { + public Response(StreamInput in) throws IOException { + super(in); } public QueryPage getPage() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java index 4b657fdf26ec7..8bde83aad02bf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,7 +29,6 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; -import java.util.Collections; import java.util.Objects; import java.util.function.LongSupplier; @@ -47,18 +46,13 @@ * the interval. *

*/ -public class GetOverallBucketsAction extends StreamableResponseActionType { +public class GetOverallBucketsAction extends ActionType { public static final GetOverallBucketsAction INSTANCE = new GetOverallBucketsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/overall_buckets/get"; private GetOverallBucketsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -117,6 +111,18 @@ public static Request parseRequest(String jobId, XContentParser parser) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + topN = in.readVInt(); + bucketSpan = in.readOptionalTimeValue(); + overallScore = in.readDouble(); + excludeInterim = in.readBoolean(); + start = in.readOptionalLong(); + end = in.readOptionalLong(); + allowNoJobs = in.readBoolean(); + } + public Request(String jobId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); } @@ -201,19 +207,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - topN = in.readVInt(); - bucketSpan = in.readOptionalTimeValue(); - overallScore = in.readDouble(); - excludeInterim = in.readBoolean(); - start = in.readOptionalLong(); - end = in.readOptionalLong(); - allowNoJobs = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -282,8 +275,8 @@ static class RequestBuilder extends ActionRequestBuilder { public static class Response extends AbstractGetResourcesResponse implements ToXContentObject { - public Response() { - super(new QueryPage<>(Collections.emptyList(), 0, OverallBucket.RESULTS_FIELD)); + public Response(StreamInput in) throws IOException { + super(in); } public Response(QueryPage overallBuckets) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java index c1af5da77265c..f5ea6f51a93d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,18 +27,13 @@ import java.io.IOException; import java.util.Objects; -public class GetRecordsAction extends StreamableResponseActionType { +public class GetRecordsAction extends ActionType { public static final GetRecordsAction INSTANCE = new GetRecordsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/records/get"; private GetRecordsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -83,6 +78,18 @@ public static Request parseRequest(String jobId, XContentParser parser) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + excludeInterim = in.readBoolean(); + pageParams = new PageParams(in); + start = in.readOptionalString(); + end = in.readOptionalString(); + sort = in.readOptionalString(); + descending = in.readBoolean(); + recordScoreFilter = in.readDouble(); + } + public Request(String jobId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); } @@ -151,19 +158,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - excludeInterim = in.readBoolean(); - pageParams = new PageParams(in); - start = in.readOptionalString(); - end = in.readOptionalString(); - sort = in.readOptionalString(); - descending = in.readBoolean(); - recordScoreFilter = in.readDouble(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -226,7 +220,8 @@ static class RequestBuilder extends ActionRequestBuilder { public static class Response extends AbstractGetResourcesResponse implements ToXContentObject { - public Response() { + public Response(StreamInput in) throws IOException { + super(in); } public Response(QueryPage records) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java index 611052717b81d..a3d8088d138ab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java @@ -41,12 +41,7 @@ public class IsolateDatafeedAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, IsolateDatafeedAction.Response::new); } public static class Request extends BaseTasksRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java index 410fc110d2416..dcd8c524415e7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java @@ -22,12 +22,7 @@ public class KillProcessAction extends ActionType { public static final String NAME = "cluster:internal/xpack/ml/job/kill/process"; private KillProcessAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, KillProcessAction.Response::new); } static class RequestBuilder extends ActionRequestBuilder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java index c02909aa761f6..e342f4a502137 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -21,18 +21,13 @@ import java.util.Map; import java.util.Objects; -public class MlInfoAction extends StreamableResponseActionType { +public class MlInfoAction extends ActionType { public static final MlInfoAction INSTANCE = new MlInfoAction(); public static final String NAME = "cluster:monitor/xpack/ml/info/get"; private MlInfoAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest { @@ -70,12 +65,15 @@ public Response() { this.info = Collections.emptyMap(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public Response(StreamInput in) throws IOException { + super(in); info = in.readMap(); } + public Map getInfo() { + return info; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(info); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index 4f50248bbbe0e..6854e59d8f4ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.ElasticsearchClient; @@ -18,7 +18,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; @@ -41,12 +40,7 @@ public class OpenJobAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/open"; private OpenJobAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends MasterNodeRequest implements ToXContentObject { @@ -74,7 +68,8 @@ public Request(String jobId) { } public Request(StreamInput in) throws IOException { - readFrom(in); + super(in); + jobParams = new JobParams(in); } public Request() { @@ -89,12 +84,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobParams = new JobParams(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java index e401240853a7f..3e0abcb558066 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java @@ -22,12 +22,7 @@ public class PersistJobAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/persist"; private PersistJobAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, PersistJobAction.Response::new); } public static class Request extends JobTaskRequest { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java index 65e9280086aad..ec8427bbc2233 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,19 +29,14 @@ import java.util.Objects; import java.util.stream.Collectors; -public class PostCalendarEventsAction extends StreamableResponseActionType { +public class PostCalendarEventsAction extends ActionType { public static final PostCalendarEventsAction INSTANCE = new PostCalendarEventsAction(); public static final String NAME = "cluster:admin/xpack/ml/calendars/events/post"; public static final ParseField EVENTS = new ParseField("events"); private PostCalendarEventsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest { @@ -73,6 +68,12 @@ public static Request parseRequest(String calendarId, XContentParser parser) thr public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + calendarId = in.readString(); + scheduledEvents = in.readList(ScheduledEvent::new); + } + public Request(String calendarId, List scheduledEvents) { this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName()); this.scheduledEvents = ExceptionsHelper.requireNonNull(scheduledEvents, EVENTS.getPreferredName()); @@ -95,13 +96,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - calendarId = in.readString(); - scheduledEvents = in.readList(ScheduledEvent::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -138,19 +132,15 @@ public static class Response extends ActionResponse implements ToXContentObject private List scheduledEvents; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + in.readList(ScheduledEvent::new); } public Response(List scheduledEvents) { this.scheduledEvents = scheduledEvents; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - in.readList(ScheduledEvent::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(scheduledEvents); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java index 6fe9c17dca99a..88a8eeefac3f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java @@ -30,12 +30,7 @@ public class PostDataAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/data/post"; private PostDataAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, PostDataAction.Response::new); } static class RequestBuilder extends ActionRequestBuilder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java index 9daa1797044fb..e238fbeb97db0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -25,18 +25,13 @@ import java.io.InputStream; import java.util.Objects; -public class PreviewDatafeedAction extends StreamableResponseActionType { +public class PreviewDatafeedAction extends ActionType { public static final PreviewDatafeedAction INSTANCE = new PreviewDatafeedAction(); public static final String NAME = "cluster:admin/xpack/ml/datafeeds/preview"; private PreviewDatafeedAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -46,6 +41,11 @@ public static class Request extends ActionRequest implements ToXContentObject { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + datafeedId = in.readString(); + } + public Request(String datafeedId) { setDatafeedId(datafeedId); } @@ -63,12 +63,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - datafeedId = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -110,21 +104,17 @@ static class RequestBuilder extends ActionRequestBuilder { public static class Response extends ActionResponse implements ToXContentObject { - private BytesReference preview; + private final BytesReference preview; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + preview = in.readBytesReference(); } public Response(BytesReference preview) { this.preview = preview; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - preview = in.readBytesReference(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBytesReference(preview); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java index 6a708cf79d006..927de59b370d9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,17 +27,12 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; -public class PutCalendarAction extends StreamableResponseActionType { +public class PutCalendarAction extends ActionType { public static final PutCalendarAction INSTANCE = new PutCalendarAction(); public static final String NAME = "cluster:admin/xpack/ml/calendars/put"; private PutCalendarAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -60,6 +55,11 @@ public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + calendar = new Calendar(in); + } + public Request(Calendar calendar) { this.calendar = ExceptionsHelper.requireNonNull(calendar, "calendar"); } @@ -89,12 +89,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - calendar = new Calendar(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -136,20 +130,15 @@ public static class Response extends ActionResponse implements ToXContentObject private Calendar calendar; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + calendar = new Calendar(in); } public Response(Calendar calendar) { this.calendar = calendar; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - calendar = new Calendar(in); - - } - @Override public void writeTo(StreamOutput out) throws IOException { calendar.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java index 2d7f1e60772aa..48c9be2819232 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java @@ -7,7 +7,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -23,18 +23,13 @@ import java.io.IOException; import java.util.Objects; -public class PutDataFrameAnalyticsAction extends StreamableResponseActionType { +public class PutDataFrameAnalyticsAction extends ActionType { public static final PutDataFrameAnalyticsAction INSTANCE = new PutDataFrameAnalyticsAction(); public static final String NAME = "cluster:admin/xpack/ml/data_frame/analytics/put"; private PutDataFrameAnalyticsAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { @@ -56,14 +51,13 @@ public static Request parseRequest(String id, XContentParser parser) { public Request() {} - public Request(DataFrameAnalyticsConfig config) { - this.config = config; + public Request(StreamInput in) throws IOException { + super(in); + config = new DataFrameAnalyticsConfig(in); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - config = new DataFrameAnalyticsConfig(in); + public Request(DataFrameAnalyticsConfig config) { + this.config = config; } @Override @@ -111,9 +105,8 @@ public Response(DataFrameAnalyticsConfig config) { Response() {} - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + Response(StreamInput in) throws IOException { + super(in); config = new DataFrameAnalyticsConfig(in); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java index 5b84165cc7c0d..481d6156204c4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java @@ -7,7 +7,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -21,18 +21,13 @@ import java.io.IOException; import java.util.Objects; -public class PutDatafeedAction extends StreamableResponseActionType { +public class PutDatafeedAction extends ActionType { public static final PutDatafeedAction INSTANCE = new PutDatafeedAction(); public static final String NAME = "cluster:admin/xpack/ml/datafeeds/put"; private PutDatafeedAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { @@ -52,6 +47,11 @@ public Request(DatafeedConfig datafeed) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + datafeed = new DatafeedConfig(in); + } + public DatafeedConfig getDatafeed() { return datafeed; } @@ -61,12 +61,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - datafeed = new DatafeedConfig(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -108,19 +102,15 @@ public Response(DatafeedConfig datafeed) { this.datafeed = datafeed; } - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + datafeed = new DatafeedConfig(in); } public DatafeedConfig getResponse() { return datafeed; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - datafeed = new DatafeedConfig(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { datafeed.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java index 4a12ceb946191..ca8e334b555f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -25,18 +25,13 @@ import java.util.Objects; -public class PutFilterAction extends StreamableResponseActionType { +public class PutFilterAction extends ActionType { public static final PutFilterAction INSTANCE = new PutFilterAction(); public static final String NAME = "cluster:admin/xpack/ml/filters/put"; private PutFilterAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -59,6 +54,11 @@ public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + filter = new MlFilter(in); + } + public Request(MlFilter filter) { this.filter = ExceptionsHelper.requireNonNull(filter, "filter"); } @@ -72,12 +72,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - filter = new MlFilter(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -122,14 +116,13 @@ public static class Response extends ActionResponse implements ToXContentObject Response() { } - public Response(MlFilter filter) { - this.filter = filter; + Response(StreamInput in) throws IOException { + super(in); + filter = new MlFilter(in); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - filter = new MlFilter(in); + public Response(MlFilter filter) { + this.filter = filter; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index 5cd31b3c5552e..ef41dbf4e40e8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -7,7 +7,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -24,18 +24,13 @@ import java.util.List; import java.util.Objects; -public class PutJobAction extends StreamableResponseActionType { +public class PutJobAction extends ActionType { public static final PutJobAction INSTANCE = new PutJobAction(); public static final String NAME = "cluster:admin/xpack/ml/job/put"; private PutJobAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { @@ -76,6 +71,11 @@ public Request(Job.Builder jobBuilder) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobBuilder = new Job.Builder(in); + } + public Job.Builder getJobBuilder() { return jobBuilder; } @@ -85,12 +85,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobBuilder = new Job.Builder(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -131,25 +125,21 @@ public RequestBuilder(ElasticsearchClient client, PutJobAction action) { public static class Response extends ActionResponse implements ToXContentObject { - private Job job; + private final Job job; public Response(Job job) { this.job = job; } - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + job = new Job(in); } public Job getResponse() { return job; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - job = new Job(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { job.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java index 1538832d7fd24..aeabe2102f09d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java @@ -7,7 +7,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -28,18 +28,13 @@ import java.io.IOException; import java.util.Objects; -public class RevertModelSnapshotAction extends StreamableResponseActionType { +public class RevertModelSnapshotAction extends ActionType { public static final RevertModelSnapshotAction INSTANCE = new RevertModelSnapshotAction(); public static final String NAME = "cluster:admin/xpack/ml/job/model_snapshots/revert"; private RevertModelSnapshotAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { @@ -73,6 +68,13 @@ public static Request parseRequest(String jobId, String snapshotId, XContentPars public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + snapshotId = in.readString(); + deleteInterveningResults = in.readBoolean(); + } + public Request(String jobId, String snapshotId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, SNAPSHOT_ID.getPreferredName()); @@ -99,14 +101,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - snapshotId = in.readString(); - deleteInterveningResults = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -156,10 +150,12 @@ public static class Response extends ActionResponse implements StatusToXContentO private static final ParseField MODEL = new ParseField("model"); private ModelSnapshot model; - public Response() { - + public Response(StreamInput in) throws IOException { + super(in); + model = new ModelSnapshot(in); } + public Response(ModelSnapshot modelSnapshot) { model = modelSnapshot; } @@ -168,12 +164,6 @@ public ModelSnapshot getModel() { return model; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - model = new ModelSnapshot(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { model.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java index 9592743554fe3..54726587187be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java @@ -5,16 +5,15 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -28,12 +27,7 @@ public class SetUpgradeModeAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/upgrade_mode"; private SetUpgradeModeAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { @@ -53,7 +47,8 @@ public Request(boolean enabled) { } public Request(StreamInput in) throws IOException { - readFrom(in); + super(in); + this.enabled = in.readBoolean(); } public Request() { @@ -68,12 +63,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.enabled = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java index b0a45c3742188..6cc64de34017f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; @@ -40,12 +39,7 @@ public class StartDataFrameAnalyticsAction extends ActionType getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends MasterNodeRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java index 9344985686965..db9054a973fb4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -7,9 +7,9 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -17,7 +17,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; @@ -49,12 +48,7 @@ public class StartDatafeedAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/datafeed/start"; private StartDatafeedAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends MasterNodeRequest implements ToXContentObject { @@ -86,7 +80,8 @@ public Request(DatafeedParams params) { } public Request(StreamInput in) throws IOException { - readFrom(in); + super(in); + params = new DatafeedParams(in); } public Request() { @@ -107,12 +102,6 @@ public ActionRequestValidationException validate() { return e; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - params = new DatafeedParams(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java index ac0ea12618fee..cd314f68eb0f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java @@ -40,12 +40,7 @@ public class StopDataFrameAnalyticsAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, StopDataFrameAnalyticsAction.Response::new); } public static class Request extends BaseTasksRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java index 72e6b63b32aca..1c21fde4382df 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java @@ -35,12 +35,7 @@ public class StopDatafeedAction extends ActionType public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMinutes(5); private StopDatafeedAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return Response::new; + super(NAME, StopDatafeedAction.Response::new); } public static class Request extends BaseTasksRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java index 02c3f7dad40da..15de7cd7d3931 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,17 +18,12 @@ import java.io.IOException; import java.util.Objects; -public class UpdateCalendarJobAction extends StreamableResponseActionType { +public class UpdateCalendarJobAction extends ActionType { public static final UpdateCalendarJobAction INSTANCE = new UpdateCalendarJobAction(); public static final String NAME = "cluster:admin/xpack/ml/calendars/jobs/update"; private UpdateCalendarJobAction() { - super(NAME); - } - - @Override - public PutCalendarAction.Response newResponse() { - return new PutCalendarAction.Response(); + super(NAME, PutCalendarAction.Response::new); } public static class Request extends ActionRequest { @@ -40,6 +35,13 @@ public static class Request extends ActionRequest { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + calendarId = in.readString(); + jobIdsToAddExpression = in.readOptionalString(); + jobIdsToRemoveExpression = in.readOptionalString(); + } + /** * Job id expressions may be a single job, job group or comma separated * list of job Ids or groups @@ -67,14 +69,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - calendarId = in.readString(); - jobIdsToAddExpression = in.readOptionalString(); - jobIdsToRemoveExpression = in.readOptionalString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java index 93c96a8a9cfc6..5d82a21662648 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -20,18 +20,13 @@ import java.io.IOException; import java.util.Objects; -public class UpdateDatafeedAction extends StreamableResponseActionType { +public class UpdateDatafeedAction extends ActionType { public static final UpdateDatafeedAction INSTANCE = new UpdateDatafeedAction(); public static final String NAME = "cluster:admin/xpack/ml/datafeeds/update"; private UpdateDatafeedAction() { - super(NAME); - } - - @Override - public PutDatafeedAction.Response newResponse() { - return new PutDatafeedAction.Response(); + super(NAME, PutDatafeedAction.Response::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { @@ -51,6 +46,11 @@ public Request(DatafeedUpdate update) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + update = new DatafeedUpdate(in); + } + public DatafeedUpdate getUpdate() { return update; } @@ -60,12 +60,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - update = new DatafeedUpdate(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java index 95696ec5ed80c..5028d4d3c9625 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -32,18 +32,13 @@ import java.util.TreeSet; -public class UpdateFilterAction extends StreamableResponseActionType { +public class UpdateFilterAction extends ActionType { public static final UpdateFilterAction INSTANCE = new UpdateFilterAction(); public static final String NAME = "cluster:admin/xpack/ml/filters/update"; private UpdateFilterAction() { - super(NAME); - } - - @Override - public PutFilterAction.Response newResponse() { - return new PutFilterAction.Response(); + super(NAME, PutFilterAction.Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -81,6 +76,14 @@ public static Request parseRequest(String filterId, XContentParser parser) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + filterId = in.readString(); + description = in.readOptionalString(); + addItems = new TreeSet<>(Arrays.asList(in.readStringArray())); + removeItems = new TreeSet<>(Arrays.asList(in.readStringArray())); + } + public Request(String filterId) { this.filterId = ExceptionsHelper.requireNonNull(filterId, MlFilter.ID.getPreferredName()); } @@ -122,15 +125,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - filterId = in.readString(); - description = in.readOptionalString(); - addItems = new TreeSet<>(Arrays.asList(in.readStringArray())); - removeItems = new TreeSet<>(Arrays.asList(in.readStringArray())); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 80dcb4e6f7091..a80e29ad34436 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -23,17 +23,12 @@ import java.io.IOException; import java.util.Objects; -public class UpdateJobAction extends StreamableResponseActionType { +public class UpdateJobAction extends ActionType { public static final UpdateJobAction INSTANCE = new UpdateJobAction(); public static final String NAME = "cluster:admin/xpack/ml/job/update"; private UpdateJobAction() { - super(NAME); - } - - @Override - public PutJobAction.Response newResponse() { - return new PutJobAction.Response(); + super(NAME, PutJobAction.Response::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { @@ -65,6 +60,13 @@ private Request(String jobId, JobUpdate update, boolean isInternal) { public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + update = new JobUpdate(in); + isInternal = in.readBoolean(); + } + public static Request internal(String jobId, JobUpdate update) { return new Request(jobId, update, true); } @@ -86,14 +88,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - update = new JobUpdate(in); - isInternal = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java index e984d49fff65d..04ac3c8deca6d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -29,18 +29,13 @@ import java.io.IOException; import java.util.Objects; -public class UpdateModelSnapshotAction extends StreamableResponseActionType { +public class UpdateModelSnapshotAction extends ActionType { public static final UpdateModelSnapshotAction INSTANCE = new UpdateModelSnapshotAction(); public static final String NAME = "cluster:admin/xpack/ml/job/model_snapshots/update"; private UpdateModelSnapshotAction() { - super(NAME); - } - - @Override - public UpdateModelSnapshotAction.Response newResponse() { - return new Response(); + super(NAME, Response::new); } public static class Request extends ActionRequest implements ToXContentObject { @@ -73,6 +68,14 @@ public static Request parseRequest(String jobId, String snapshotId, XContentPars public Request() { } + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + snapshotId = in.readString(); + description = in.readOptionalString(); + retain = in.readOptionalBoolean(); + } + public Request(String jobId, String snapshotId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, ModelSnapshotField.SNAPSHOT_ID.getPreferredName()); @@ -107,15 +110,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - jobId = in.readString(); - snapshotId = in.readString(); - description = in.readOptionalString(); - retain = in.readOptionalBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -166,10 +160,11 @@ public static class Response extends ActionResponse implements StatusToXContentO private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); private static final ParseField MODEL = new ParseField("model"); - private ModelSnapshot model; - - public Response() { + private final ModelSnapshot model; + public Response(StreamInput in) throws IOException { + super(in); + model = new ModelSnapshot(in); } public Response(ModelSnapshot modelSnapshot) { @@ -180,12 +175,6 @@ public ModelSnapshot getModel() { return model; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - model = new ModelSnapshot(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { model.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java index 331344fdace64..2c198f1da3852 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java @@ -29,12 +29,7 @@ public class UpdateProcessAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, UpdateProcessAction.Response::new); } static class RequestBuilder extends ActionRequestBuilder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java index 6a33fa1fdbd6d..ffe1ab9f77a4f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -28,12 +27,7 @@ public class ValidateDetectorAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/validate/detector"; protected ValidateDetectorAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class RequestBuilder extends ActionRequestBuilder { @@ -61,6 +55,11 @@ public Request(Detector detector) { this.detector = detector; } + public Request(StreamInput in) throws IOException { + super(in); + detector = new Detector(in); + } + public Detector getDetector() { return detector; } @@ -76,12 +75,6 @@ public void writeTo(StreamOutput out) throws IOException { detector.writeTo(out); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - detector = new Detector(in); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { detector.toXContent(builder, params); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java index f6abae83eef2d..4830ea711e0e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.messages.Messages; @@ -29,12 +28,7 @@ public class ValidateJobConfigAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/validate"; protected ValidateJobConfigAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class RequestBuilder extends ActionRequestBuilder { @@ -78,6 +72,11 @@ public Request(Job job) { this.job = job; } + public Request(StreamInput in) throws IOException { + super(in); + job = new Job(in); + } + public Job getJob() { return job; } @@ -93,12 +92,6 @@ public void writeTo(StreamOutput out) throws IOException { job.writeTo(out); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - job = new Job(in); - } - @Override public int hashCode() { return Objects.hash(job); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java index 443cb84ecdc0a..775dc9931bc86 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; import java.util.Objects; @@ -26,7 +27,9 @@ public class DatafeedTimingStats implements ToXContentObject, Writeable { public static final ParseField JOB_ID = new ParseField("job_id"); public static final ParseField SEARCH_COUNT = new ParseField("search_count"); + public static final ParseField BUCKET_COUNT = new ParseField("bucket_count"); public static final ParseField TOTAL_SEARCH_TIME_MS = new ParseField("total_search_time_ms"); + public static final ParseField AVG_SEARCH_TIME_PER_BUCKET_MS = new ParseField("average_search_time_per_bucket_ms"); public static final ParseField TYPE = new ParseField("datafeed_timing_stats"); @@ -40,11 +43,14 @@ private static ConstructingObjectParser createParser( args -> { String jobId = (String) args[0]; Long searchCount = (Long) args[1]; - Double totalSearchTimeMs = (Double) args[2]; - return new DatafeedTimingStats(jobId, getOrDefault(searchCount, 0L), getOrDefault(totalSearchTimeMs, 0.0)); + Long bucketCount = (Long) args[2]; + Double totalSearchTimeMs = (Double) args[3]; + return new DatafeedTimingStats( + jobId, getOrDefault(searchCount, 0L), getOrDefault(bucketCount, 0L), getOrDefault(totalSearchTimeMs, 0.0)); }); parser.declareString(constructorArg(), JOB_ID); parser.declareLong(optionalConstructorArg(), SEARCH_COUNT); + parser.declareLong(optionalConstructorArg(), BUCKET_COUNT); parser.declareDouble(optionalConstructorArg(), TOTAL_SEARCH_TIME_MS); return parser; } @@ -55,26 +61,29 @@ public static String documentId(String jobId) { private final String jobId; private long searchCount; + private long bucketCount; private double totalSearchTimeMs; - public DatafeedTimingStats(String jobId, long searchCount, double totalSearchTimeMs) { + public DatafeedTimingStats(String jobId, long searchCount, long bucketCount, double totalSearchTimeMs) { this.jobId = Objects.requireNonNull(jobId); this.searchCount = searchCount; + this.bucketCount = bucketCount; this.totalSearchTimeMs = totalSearchTimeMs; } public DatafeedTimingStats(String jobId) { - this(jobId, 0, 0); + this(jobId, 0, 0, 0.0); } public DatafeedTimingStats(StreamInput in) throws IOException { jobId = in.readString(); searchCount = in.readLong(); + bucketCount = in.readLong(); totalSearchTimeMs = in.readDouble(); } public DatafeedTimingStats(DatafeedTimingStats other) { - this(other.jobId, other.searchCount, other.totalSearchTimeMs); + this(other.jobId, other.searchCount, other.bucketCount, other.totalSearchTimeMs); } public String getJobId() { @@ -85,19 +94,34 @@ public long getSearchCount() { return searchCount; } + public long getBucketCount() { + return bucketCount; + } + public double getTotalSearchTimeMs() { return totalSearchTimeMs; } + public Double getAvgSearchTimePerBucketMs() { + return bucketCount > 0 + ? totalSearchTimeMs / bucketCount + : null; + } + public void incrementTotalSearchTimeMs(double searchTimeMs) { this.searchCount++; this.totalSearchTimeMs += searchTimeMs; } + public void setBucketCount(long bucketCount) { + this.bucketCount = bucketCount; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeLong(searchCount); + out.writeLong(bucketCount); out.writeDouble(totalSearchTimeMs); } @@ -106,7 +130,14 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.startObject(); builder.field(JOB_ID.getPreferredName(), jobId); builder.field(SEARCH_COUNT.getPreferredName(), searchCount); + builder.field(BUCKET_COUNT.getPreferredName(), bucketCount); builder.field(TOTAL_SEARCH_TIME_MS.getPreferredName(), totalSearchTimeMs); + if (params.paramAsBoolean(ToXContentParams.INCLUDE_CALCULATED_FIELDS, false)) { + Double avgSearchTimePerBucket = getAvgSearchTimePerBucketMs(); + if (avgSearchTimePerBucket != null) { + builder.field(AVG_SEARCH_TIME_PER_BUCKET_MS.getPreferredName(), getAvgSearchTimePerBucketMs()); + } + } builder.endObject(); return builder; } @@ -123,12 +154,13 @@ public boolean equals(Object obj) { DatafeedTimingStats other = (DatafeedTimingStats) obj; return Objects.equals(this.jobId, other.jobId) && this.searchCount == other.searchCount + && this.bucketCount == other.bucketCount && this.totalSearchTimeMs == other.totalSearchTimeMs; } @Override public int hashCode() { - return Objects.hash(jobId, searchCount, totalSearchTimeMs); + return Objects.hash(jobId, searchCount, bucketCount, totalSearchTimeMs); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/MlEvaluationNamedXContentProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/MlEvaluationNamedXContentProvider.java index f4a6dba88e3b1..a2aa8e74918ac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/MlEvaluationNamedXContentProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/MlEvaluationNamedXContentProvider.java @@ -8,6 +8,10 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.plugins.spi.NamedXContentProvider; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression.MeanSquaredError; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression.RSquared; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression.Regression; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression.RegressionMetric; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.softclassification.AucRoc; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.softclassification.BinarySoftClassification; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.softclassification.ConfusionMatrix; @@ -28,6 +32,7 @@ public List getNamedXContentParsers() { // Evaluations namedXContent.add(new NamedXContentRegistry.Entry(Evaluation.class, BinarySoftClassification.NAME, BinarySoftClassification::fromXContent)); + namedXContent.add(new NamedXContentRegistry.Entry(Evaluation.class, Regression.NAME, Regression::fromXContent)); // Soft classification metrics namedXContent.add(new NamedXContentRegistry.Entry(SoftClassificationMetric.class, AucRoc.NAME, AucRoc::fromXContent)); @@ -36,6 +41,10 @@ public List getNamedXContentParsers() { namedXContent.add(new NamedXContentRegistry.Entry(SoftClassificationMetric.class, ConfusionMatrix.NAME, ConfusionMatrix::fromXContent)); + // Regression metrics + namedXContent.add(new NamedXContentRegistry.Entry(RegressionMetric.class, MeanSquaredError.NAME, MeanSquaredError::fromXContent)); + namedXContent.add(new NamedXContentRegistry.Entry(RegressionMetric.class, RSquared.NAME, RSquared::fromXContent)); + return namedXContent; } @@ -45,6 +54,7 @@ public List getNamedWriteables() { // Evaluations namedWriteables.add(new NamedWriteableRegistry.Entry(Evaluation.class, BinarySoftClassification.NAME.getPreferredName(), BinarySoftClassification::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(Evaluation.class, Regression.NAME.getPreferredName(), Regression::new)); // Evaluation Metrics namedWriteables.add(new NamedWriteableRegistry.Entry(SoftClassificationMetric.class, AucRoc.NAME.getPreferredName(), @@ -55,6 +65,12 @@ public List getNamedWriteables() { Recall::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(SoftClassificationMetric.class, ConfusionMatrix.NAME.getPreferredName(), ConfusionMatrix::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(RegressionMetric.class, + MeanSquaredError.NAME.getPreferredName(), + MeanSquaredError::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(RegressionMetric.class, + RSquared.NAME.getPreferredName(), + RSquared::new)); // Evaluation Metrics Results namedWriteables.add(new NamedWriteableRegistry.Entry(EvaluationMetricResult.class, AucRoc.NAME.getPreferredName(), @@ -63,6 +79,12 @@ public List getNamedWriteables() { ScoreByThresholdResult::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(EvaluationMetricResult.class, ConfusionMatrix.NAME.getPreferredName(), ConfusionMatrix.Result::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(EvaluationMetricResult.class, + MeanSquaredError.NAME.getPreferredName(), + MeanSquaredError.Result::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(EvaluationMetricResult.class, + RSquared.NAME.getPreferredName(), + RSquared.Result::new)); return namedWriteables; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredError.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredError.java new file mode 100644 index 0000000000000..8dd922b6ac26e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredError.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.EvaluationMetricResult; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +/** + * Calculates the mean squared error between two known numerical fields. + * + * equation: mse = 1/n * Σ(y - y´)^2 + */ +public class MeanSquaredError implements RegressionMetric { + + public static final ParseField NAME = new ParseField("mean_squared_error"); + + private static final String PAINLESS_TEMPLATE = "def diff = doc[''{0}''].value - doc[''{1}''].value;return diff * diff;"; + private static final String AGG_NAME = "regression_" + NAME.getPreferredName(); + + private static String buildScript(Object...args) { + return new MessageFormat(PAINLESS_TEMPLATE, Locale.ROOT).format(args); + } + + private static final ObjectParser PARSER = + new ObjectParser<>("mean_squared_error", true, MeanSquaredError::new); + + public static MeanSquaredError fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public MeanSquaredError(StreamInput in) { + + } + + public MeanSquaredError() { + + } + + @Override + public String getMetricName() { + return NAME.getPreferredName(); + } + + @Override + public List aggs(String actualField, String predictedField) { + return Collections.singletonList(AggregationBuilders.avg(AGG_NAME).script(new Script(buildScript(actualField, predictedField)))); + } + + @Override + public EvaluationMetricResult evaluate(Aggregations aggs) { + NumericMetricsAggregation.SingleValue value = aggs.get(AGG_NAME); + return value == null ? null : new Result(value.value()); + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return true; + } + + @Override + public int hashCode() { + // create static hash code from name as there are currently no unique fields per class instance + return Objects.hashCode(NAME.getPreferredName()); + } + + public static class Result implements EvaluationMetricResult { + + private static final String ERROR = "error"; + private final double error; + + public Result(double error) { + this.error = error; + } + + public Result(StreamInput in) throws IOException { + this.error = in.readDouble(); + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(error); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ERROR, error); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquared.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquared.java new file mode 100644 index 0000000000000..871f166733f4c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquared.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; +import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.EvaluationMetricResult; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +/** + * Calculates R-Squared between two known numerical fields. + * + * equation: R-Squared = 1 - SSres/SStot + * such that, + * SSres = Σ(y - y´)^2, The residual sum of squares + * SStot = Σ(y - y_mean)^2, The total sum of squares + */ +public class RSquared implements RegressionMetric { + + public static final ParseField NAME = new ParseField("r_squared"); + + private static final String PAINLESS_TEMPLATE = "def diff = doc[''{0}''].value - doc[''{1}''].value;return diff * diff;"; + private static final String SS_RES = "residual_sum_of_squares"; + + private static String buildScript(Object... args) { + return new MessageFormat(PAINLESS_TEMPLATE, Locale.ROOT).format(args); + } + + private static final ObjectParser PARSER = + new ObjectParser<>("r_squared", true, RSquared::new); + + public static RSquared fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public RSquared(StreamInput in) { + + } + + public RSquared() { + + } + + @Override + public String getMetricName() { + return NAME.getPreferredName(); + } + + @Override + public List aggs(String actualField, String predictedField) { + return Arrays.asList( + AggregationBuilders.sum(SS_RES).script(new Script(buildScript(actualField, predictedField))), + AggregationBuilders.extendedStats(ExtendedStatsAggregationBuilder.NAME + "_actual").field(actualField)); + } + + @Override + public EvaluationMetricResult evaluate(Aggregations aggs) { + NumericMetricsAggregation.SingleValue residualSumOfSquares = aggs.get(SS_RES); + ExtendedStats extendedStats = aggs.get(ExtendedStatsAggregationBuilder.NAME + "_actual"); + // extendedStats.getVariance() is the statistical sumOfSquares divided by count + return residualSumOfSquares == null || extendedStats == null || extendedStats.getCount() == 0 ? + null : + new Result(1 - (residualSumOfSquares.value() / (extendedStats.getVariance() * extendedStats.getCount()))); + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return true; + } + + @Override + public int hashCode() { + // create static hash code from name as there are currently no unique fields per class instance + return Objects.hashCode(NAME.getPreferredName()); + } + + public static class Result implements EvaluationMetricResult { + + private static final String VALUE = "value"; + private final double value; + + public Result(double value) { + this.value = value; + } + + public Result(StreamInput in) throws IOException { + this.value = in.readDouble(); + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(VALUE, value); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java new file mode 100644 index 0000000000000..e3869dce2ee51 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.Evaluation; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.EvaluationMetricResult; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; + +/** + * Evaluation of regression results. + */ +public class Regression implements Evaluation { + + public static final ParseField NAME = new ParseField("regression"); + + private static final ParseField ACTUAL_FIELD = new ParseField("actual_field"); + private static final ParseField PREDICTED_FIELD = new ParseField("predicted_field"); + private static final ParseField METRICS = new ParseField("metrics"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME.getPreferredName(), a -> new Regression((String) a[0], (String) a[1], (List) a[2])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ACTUAL_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), PREDICTED_FIELD); + PARSER.declareNamedObjects(ConstructingObjectParser.optionalConstructorArg(), + (p, c, n) -> p.namedObject(RegressionMetric.class, n, c), METRICS); + } + + public static Regression fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * The field containing the actual value + * The value of this field is assumed to be numeric + */ + private final String actualField; + + /** + * The field containing the predicted value + * The value of this field is assumed to be numeric + */ + private final String predictedField; + + /** + * The list of metrics to calculate + */ + private final List metrics; + + public Regression(String actualField, String predictedField, @Nullable List metrics) { + this.actualField = ExceptionsHelper.requireNonNull(actualField, ACTUAL_FIELD); + this.predictedField = ExceptionsHelper.requireNonNull(predictedField, PREDICTED_FIELD); + this.metrics = initMetrics(metrics); + } + + public Regression(StreamInput in) throws IOException { + this.actualField = in.readString(); + this.predictedField = in.readString(); + this.metrics = in.readNamedWriteableList(RegressionMetric.class); + } + + private static List initMetrics(@Nullable List parsedMetrics) { + List metrics = parsedMetrics == null ? defaultMetrics() : parsedMetrics; + if (metrics.isEmpty()) { + throw ExceptionsHelper.badRequestException("[{}] must have one or more metrics", NAME.getPreferredName()); + } + Collections.sort(metrics, Comparator.comparing(RegressionMetric::getMetricName)); + return metrics; + } + + private static List defaultMetrics() { + List defaultMetrics = new ArrayList<>(2); + defaultMetrics.add(new MeanSquaredError()); + defaultMetrics.add(new RSquared()); + return defaultMetrics; + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public SearchSourceBuilder buildSearch() { + BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() + .filter(QueryBuilders.existsQuery(actualField)) + .filter(QueryBuilders.existsQuery(predictedField)); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0).query(boolQuery); + for (RegressionMetric metric : metrics) { + List aggs = metric.aggs(actualField, predictedField); + aggs.forEach(searchSourceBuilder::aggregation); + } + return searchSourceBuilder; + } + + @Override + public void evaluate(SearchResponse searchResponse, ActionListener> listener) { + List results = new ArrayList<>(metrics.size()); + for (RegressionMetric metric : metrics) { + results.add(metric.evaluate(searchResponse.getAggregations())); + } + listener.onResponse(results); + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(actualField); + out.writeString(predictedField); + out.writeNamedWriteableList(metrics); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACTUAL_FIELD.getPreferredName(), actualField); + builder.field(PREDICTED_FIELD.getPreferredName(), predictedField); + + builder.startObject(METRICS.getPreferredName()); + for (RegressionMetric metric : metrics) { + builder.field(metric.getWriteableName(), metric); + } + builder.endObject(); + + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Regression that = (Regression) o; + return Objects.equals(that.actualField, this.actualField) + && Objects.equals(that.predictedField, this.predictedField) + && Objects.equals(that.metrics, this.metrics); + } + + @Override + public int hashCode() { + return Objects.hash(actualField, predictedField, metrics); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionMetric.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionMetric.java new file mode 100644 index 0000000000000..1da48e2f305e6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionMetric.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.EvaluationMetricResult; + +import java.util.List; + +public interface RegressionMetric extends ToXContentObject, NamedWriteable { + + /** + * Returns the name of the metric (which may differ to the writeable name) + */ + String getMetricName(); + + /** + * Builds the aggregation that collect required data to compute the metric + * @param actualField the field that stores the actual value + * @param predictedField the field that stores the predicted value + * @return the aggregations required to compute the metric + */ + List aggs(String actualField, String predictedField); + + /** + * Calculates the metric result + * @param aggs the aggregations + * @return the metric result + */ + EvaluationMetricResult evaluate(Aggregations aggs); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 07b823f27768c..c2d9c857e563b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -398,10 +398,10 @@ public Builder(AnalysisConfig analysisConfig) { this.multivariateByFields = analysisConfig.multivariateByFields; } - public void setDetectors(List detectors) { + public Builder setDetectors(List detectors) { if (detectors == null) { this.detectors = null; - return; + return this; } // We always assign sequential IDs to the detectors that are correct for this analysis config int detectorIndex = 0; @@ -412,42 +412,52 @@ public void setDetectors(List detectors) { sequentialIndexDetectors.add(builder.build()); } this.detectors = sequentialIndexDetectors; + return this; } - public void setDetector(int detectorIndex, Detector detector) { + public Builder setDetector(int detectorIndex, Detector detector) { detectors.set(detectorIndex, detector); + return this; } - public void setBucketSpan(TimeValue bucketSpan) { + public Builder setBucketSpan(TimeValue bucketSpan) { this.bucketSpan = bucketSpan; + return this; } - public void setLatency(TimeValue latency) { + public Builder setLatency(TimeValue latency) { this.latency = latency; + return this; } - public void setCategorizationFieldName(String categorizationFieldName) { + public Builder setCategorizationFieldName(String categorizationFieldName) { this.categorizationFieldName = categorizationFieldName; + return this; } - public void setCategorizationFilters(List categorizationFilters) { + public Builder setCategorizationFilters(List categorizationFilters) { this.categorizationFilters = categorizationFilters; + return this; } - public void setCategorizationAnalyzerConfig(CategorizationAnalyzerConfig categorizationAnalyzerConfig) { + public Builder setCategorizationAnalyzerConfig(CategorizationAnalyzerConfig categorizationAnalyzerConfig) { this.categorizationAnalyzerConfig = categorizationAnalyzerConfig; + return this; } - public void setSummaryCountFieldName(String summaryCountFieldName) { + public Builder setSummaryCountFieldName(String summaryCountFieldName) { this.summaryCountFieldName = summaryCountFieldName; + return this; } - public void setInfluencers(List influencers) { + public Builder setInfluencers(List influencers) { this.influencers = ExceptionsHelper.requireNonNull(influencers, INFLUENCERS.getPreferredName()); + return this; } - public void setMultivariateByFields(Boolean multivariateByFields) { + public Builder setMultivariateByFields(Boolean multivariateByFields) { this.multivariateByFields = multivariateByFields; + return this; } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 77073a23491e6..6981772066b96 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; @@ -140,9 +140,13 @@ private ElasticsearchMappings() { } public static XContentBuilder configMapping() throws IOException { + return configMapping(SINGLE_MAPPING_NAME); + } + + public static XContentBuilder configMapping(String mappingType) throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); - builder.startObject(SINGLE_MAPPING_NAME); + builder.startObject(mappingType); addMetaInformation(builder); addDefaultMapping(builder); builder.startObject(PROPERTIES); @@ -932,6 +936,7 @@ private static void addTimingStatsExceptBucketCountMapping(XContentBuilder build /** * {@link DatafeedTimingStats} mapping. + * Does not include mapping for BUCKET_COUNT as this mapping is added by {@link #addDataCountsMapping} method. * * @throws IOException On builder write error */ @@ -940,6 +945,7 @@ private static void addDatafeedTimingStats(XContentBuilder builder) throws IOExc .startObject(DatafeedTimingStats.SEARCH_COUNT.getPreferredName()) .field(TYPE, LONG) .endObject() + // re-used: BUCKET_COUNT .startObject(DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName()) .field(TYPE, DOUBLE) .endObject(); @@ -1146,7 +1152,7 @@ static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndic } public static void addDocMappingIfMissing(String alias, - CheckedBiFunction, XContentBuilder, IOException> mappingSupplier, + CheckedFunction mappingSupplier, Client client, ClusterState state, ActionListener listener) { AliasOrIndex aliasOrIndex = state.metaData().getAliasAndIndexLookup().get(alias); if (aliasOrIndex == null) { @@ -1170,7 +1176,7 @@ public static void addDocMappingIfMissing(String alias, IndexMetaData indexMetaData = state.metaData().index(indicesThatRequireAnUpdate[0]); String mappingType = indexMetaData.mapping().type(); - try (XContentBuilder mapping = mappingSupplier.apply(mappingType, Collections.emptyList())) { + try (XContentBuilder mapping = mappingSupplier.apply(mappingType)) { PutMappingRequest putMappingRequest = new PutMappingRequest(indicesThatRequireAnUpdate); putMappingRequest.type(mappingType); putMappingRequest.source(mapping); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java index 0bcc4ea5f45d2..b526d614df3ab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; import java.util.Objects; @@ -28,6 +29,7 @@ public class TimingStats implements ToXContentObject, Writeable { public static final ParseField BUCKET_COUNT = new ParseField("bucket_count"); + public static final ParseField TOTAL_BUCKET_PROCESSING_TIME_MS = new ParseField("total_bucket_processing_time_ms"); public static final ParseField MIN_BUCKET_PROCESSING_TIME_MS = new ParseField("minimum_bucket_processing_time_ms"); public static final ParseField MAX_BUCKET_PROCESSING_TIME_MS = new ParseField("maximum_bucket_processing_time_ms"); public static final ParseField AVG_BUCKET_PROCESSING_TIME_MS = new ParseField("average_bucket_processing_time_ms"); @@ -40,8 +42,21 @@ public class TimingStats implements ToXContentObject, Writeable { new ConstructingObjectParser<>( TYPE.getPreferredName(), true, - args -> - new TimingStats((String) args[0], (long) args[1], (Double) args[2], (Double) args[3], (Double) args[4], (Double) args[5])); + args -> { + String jobId = (String) args[0]; + long bucketCount = (long) args[1]; + Double minBucketProcessingTimeMs = (Double) args[2]; + Double maxBucketProcessingTimeMs = (Double) args[3]; + Double avgBucketProcessingTimeMs = (Double) args[4]; + Double exponentialAvgBucketProcessingTimeMs = (Double) args[5]; + return new TimingStats( + jobId, + bucketCount, + minBucketProcessingTimeMs, + maxBucketProcessingTimeMs, + avgBucketProcessingTimeMs, + exponentialAvgBucketProcessingTimeMs); + }); static { PARSER.declareString(constructorArg(), Job.ID); @@ -109,6 +124,13 @@ public long getBucketCount() { return bucketCount; } + /** Calculates total bucket processing time as a product of the all-time average bucket processing time and the number of buckets. */ + public double getTotalBucketProcessingTimeMs() { + return avgBucketProcessingTimeMs != null + ? bucketCount * avgBucketProcessingTimeMs + : 0.0; + } + public Double getMinBucketProcessingTimeMs() { return minBucketProcessingTimeMs; } @@ -126,7 +148,7 @@ public Double getExponentialAvgBucketProcessingTimeMs() { } /** - * Updates the statistics (min, max, avg) for the given data point (bucket processing time). + * Updates the statistics (min, max, avg, exponential avg) for the given data point (bucket processing time). */ public void updateStats(double bucketProcessingTimeMs) { if (bucketProcessingTimeMs < 0.0) { @@ -175,6 +197,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(Job.ID.getPreferredName(), jobId); builder.field(BUCKET_COUNT.getPreferredName(), bucketCount); + if (params.paramAsBoolean(ToXContentParams.INCLUDE_CALCULATED_FIELDS, false)) { + builder.field(TOTAL_BUCKET_PROCESSING_TIME_MS.getPreferredName(), getTotalBucketProcessingTimeMs()); + } if (minBucketProcessingTimeMs != null) { builder.field(MIN_BUCKET_PROCESSING_TIME_MS.getPreferredName(), minBucketProcessingTimeMs); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java index ddd13e7fc31ba..51717c6bad2d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -187,6 +187,7 @@ public final class ReservedFieldNames { TimingStats.EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName(), DatafeedTimingStats.SEARCH_COUNT.getPreferredName(), + DatafeedTimingStats.BUCKET_COUNT.getPreferredName(), DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), GetResult._ID, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java index f7fb9d46ec8a8..249a319823c75 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java @@ -24,6 +24,12 @@ public final class ToXContentParams { */ public static final String INCLUDE_TYPE = "include_type"; + /** + * When serialising POJOs to X Content this indicates whether the calculated (i.e. not stored) fields + * should be included or not + */ + public static final String INCLUDE_CALCULATED_FIELDS = "include_calculated_fields"; + private ToXContentParams() { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java index 6426c9fb2ce7b..fda62be575cae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java @@ -5,20 +5,15 @@ */ package org.elasticsearch.xpack.core.monitoring.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class MonitoringBulkAction extends StreamableResponseActionType { +public class MonitoringBulkAction extends ActionType { public static final MonitoringBulkAction INSTANCE = new MonitoringBulkAction(); public static final String NAME = "cluster:admin/xpack/monitoring/bulk"; private MonitoringBulkAction() { - super(NAME); - } - - @Override - public MonitoringBulkResponse newResponse() { - return new MonitoringBulkResponse(); + super(NAME, MonitoringBulkResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkDoc.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkDoc.java index 0de18220c1010..c95dbb03a1d46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkDoc.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkDoc.java @@ -46,20 +46,15 @@ public MonitoringBulkDoc(final MonitoredSystem system, this.xContentType = Objects.requireNonNull(xContentType); } - /** - * Read from a stream. - */ - public static MonitoringBulkDoc readFrom(StreamInput in) throws IOException { - final MonitoredSystem system = MonitoredSystem.fromSystem(in.readOptionalString()); - final long timestamp = in.readVLong(); - - final String type = in.readOptionalString(); - final String id = in.readOptionalString(); - final BytesReference source = in.readBytesReference(); - final XContentType xContentType = (source != BytesArray.EMPTY) ? in.readEnum(XContentType.class) : XContentType.JSON; - long interval = in.readVLong(); - - return new MonitoringBulkDoc(system, type, id, timestamp, interval, source, xContentType); + public MonitoringBulkDoc (StreamInput in) throws IOException { + this.system = MonitoredSystem.fromSystem(in.readOptionalString()); + this.timestamp = in.readVLong(); + + this.type = in.readOptionalString(); + this.id = in.readOptionalString(); + this.source = in.readBytesReference(); + this.xContentType = (source != BytesArray.EMPTY) ? in.readEnum(XContentType.class) : XContentType.JSON; + this.intervalMillis = in.readVLong(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java index 12c4e0b7ee3cc..5c1d700343fc8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java @@ -32,6 +32,13 @@ public class MonitoringBulkRequest extends ActionRequest { private final List docs = new ArrayList<>(); + public MonitoringBulkRequest() {} + + public MonitoringBulkRequest(StreamInput in) throws IOException { + super(in); + docs.addAll(in.readList(MonitoringBulkDoc::new)); + } + /** * @return the list of {@link MonitoringBulkDoc} to be indexed */ @@ -95,12 +102,6 @@ public MonitoringBulkRequest add(final MonitoredSystem system, return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - docs.addAll(in.readList(MonitoringBulkDoc::readFrom)); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java index 1a70cfac8620f..e880f08c9cd83 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java @@ -25,9 +25,6 @@ public class MonitoringBulkResponse extends ActionResponse { private Error error; private boolean ignored; - public MonitoringBulkResponse() { - } - public MonitoringBulkResponse(final long tookInMillis, final boolean ignored) { this.tookInMillis = tookInMillis; this.ignored = ignored; @@ -38,6 +35,13 @@ public MonitoringBulkResponse(final long tookInMillis, final Error error) { this.error = error; } + public MonitoringBulkResponse(StreamInput in) throws IOException { + super(in); + tookInMillis = in.readVLong(); + error = in.readOptionalWriteable(Error::new); + ignored = in.readBoolean(); + } + public TimeValue getTook() { return new TimeValue(tookInMillis); } @@ -75,14 +79,6 @@ public Error getError() { return error; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - tookInMillis = in.readVLong(); - error = in.readOptionalWriteable(Error::new); - ignored = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(tookInMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java index 2a8b8eebb22e9..1701e2a3d372f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java @@ -35,12 +35,7 @@ public class DeleteRollupJobAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, DeleteRollupJobAction.Response::new); } public static class Request extends BaseTasksRequest implements ToXContentFragment { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java index 81e97d4f280ad..c483940aab585 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; @@ -27,7 +27,7 @@ import java.util.Map; import java.util.Objects; -public class GetRollupCapsAction extends StreamableResponseActionType { +public class GetRollupCapsAction extends ActionType { public static final GetRollupCapsAction INSTANCE = new GetRollupCapsAction(); public static final String NAME = "cluster:monitor/xpack/rollup/get/caps"; @@ -35,12 +35,7 @@ public class GetRollupCapsAction extends StreamableResponseActionType { +public class GetRollupIndexCapsAction extends ActionType { public static final GetRollupIndexCapsAction INSTANCE = new GetRollupIndexCapsAction(); public static final String NAME = "indices:data/read/xpack/rollup/get/index/caps"; @@ -39,12 +39,7 @@ public class GetRollupIndexCapsAction extends StreamableResponseActionType getResponseReader() { - return Response::new; + super(NAME, GetRollupJobsAction.Response::new); } public static class Request extends BaseTasksRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java index 4dcdc2c22f88a..bea98f1c4534b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.support.IndicesOptions; @@ -16,7 +16,6 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -32,12 +31,7 @@ public class PutRollupJobAction extends ActionType { public static final String NAME = "cluster:admin/xpack/rollup/put"; private PutRollupJobAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { @@ -49,6 +43,11 @@ public Request(RollupJobConfig config) { this.config = config; } + public Request(StreamInput in) throws IOException { + super(in); + this.config = new RollupJobConfig(in); + } + public Request() { } @@ -65,12 +64,6 @@ public void setConfig(RollupJobConfig config) { this.config = config; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.config = new RollupJobConfig(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java index b64f2401718f4..30314dfde818f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java @@ -5,12 +5,11 @@ */ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.io.stream.Writeable; public class RollupSearchAction extends ActionType { @@ -18,12 +17,7 @@ public class RollupSearchAction extends ActionType { public static final String NAME = "indices:admin/xpack/rollup/search"; private RollupSearchAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return SearchResponse::new; + super(NAME, SearchResponse::new); } public static class RequestBuilder extends ActionRequestBuilder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java index c844acd63e198..5919ba525645f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java @@ -30,12 +30,7 @@ public class StartRollupJobAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, StartRollupJobAction.Response::new); } public static class Request extends BaseTasksRequest implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java index 42a968548043a..1ecc8b991f1a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java @@ -36,12 +36,7 @@ public class StopRollupJobAction extends ActionType getResponseReader() { - return Response::new; + super(NAME, StopRollupJobAction.Response::new); } public static class Request extends BaseTasksRequest implements ToXContentObject { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/CronSchedule.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/CronSchedule.java similarity index 75% rename from x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/CronSchedule.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/CronSchedule.java index 0a093742cdc29..b2c763e47f39c 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/CronSchedule.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/CronSchedule.java @@ -3,15 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.rollup.job; - -import org.elasticsearch.xpack.core.scheduler.Cron; -import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +package org.elasticsearch.xpack.core.scheduler; public class CronSchedule implements SchedulerEngine.Schedule { private final Cron cron; - CronSchedule(String cronExpression) { + public CronSchedule(String cronExpression) { this.cron = new Cron(cronExpression); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java index 95dca09661978..1c2a9538c25a9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java @@ -17,9 +17,12 @@ import java.time.Clock; import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -136,6 +139,10 @@ public void stop() { } } + public Set scheduledJobIds() { + return Collections.unmodifiableSet(new HashSet<>(schedules.keySet())); + } + public void add(Job job) { ActiveSchedule schedule = new ActiveSchedule(job.getId(), job.getSchedule(), clock.millis()); schedules.compute(schedule.name, (name, previousSchedule) -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java index 0da07a52996ad..e1922ca87eacf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.core.security; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Collections; import java.util.Objects; import java.util.function.Consumer; @@ -53,10 +54,8 @@ public Authentication getAuthentication() { try { return Authentication.readFromContext(threadContext); } catch (IOException e) { - // TODO: this seems bogus, the only way to get an ioexception here is from a corrupt or tampered - // auth header, which should be be audited? logger.error("failed to read authentication", e); - return null; + throw new UncheckedIOException(e); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java index b289819d49869..00f96cbf886a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.security.action; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; /** * ActionType for the creation of an API key @@ -18,11 +17,7 @@ public final class CreateApiKeyAction extends ActionType { public static final CreateApiKeyAction INSTANCE = new CreateApiKeyAction(); private CreateApiKeyAction() { - super(NAME); + super(NAME, CreateApiKeyResponse::new); } - @Override - public Writeable.Reader getResponseReader() { - return CreateApiKeyResponse::new; - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequest.java index 78ba943f00442..13f480d4c09a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequest.java @@ -124,9 +124,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(roleDescriptors); refreshPolicy.writeTo(out); } - - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponse.java index 708afeb07e74f..56c855fe26b00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponse.java @@ -133,11 +133,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalInstant(expiration); } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - public static CreateApiKeyResponse fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java index b0ee860c71a64..2b2484ee5bfef 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.security.action; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; /** * ActionType for retrieving API key(s) @@ -18,11 +17,6 @@ public final class GetApiKeyAction extends ActionType { public static final GetApiKeyAction INSTANCE = new GetApiKeyAction(); private GetApiKeyAction() { - super(NAME); + super(NAME, GetApiKeyResponse::new); } - - @Override - public Writeable.Reader getResponseReader() { - return GetApiKeyResponse::new; - } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java index 287ebcee4b6f2..125602f68c5e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java @@ -139,8 +139,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(apiKeyName); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponse.java index d33e2c3e635e6..f4765721819b5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponse.java @@ -57,11 +57,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.endObject(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(foundApiKeysInfo); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java index 5b99a4c035015..965d2231c8457 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.security.action; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; /** * ActionType for invalidating API key @@ -18,11 +17,6 @@ public final class InvalidateApiKeyAction extends ActionType getResponseReader() { - return InvalidateApiKeyResponse::new; + super(NAME, InvalidateApiKeyResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java index f8815785d53d8..15a2c87becd20 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java @@ -138,9 +138,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeOptionalString(name); } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponse.java index eeb9860d47686..39850c0fff88d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponse.java @@ -102,11 +102,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.endObject(); } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(invalidatedApiKeys); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java index f65d82730ef3e..ea4deb9e3dae7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.security.action.oidc; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; /** * ActionType for initiating an authentication process using OpenID Connect @@ -17,11 +16,7 @@ public final class OpenIdConnectAuthenticateAction extends ActionType getResponseReader() { - return OpenIdConnectAuthenticateResponse::new; - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java index 1e27e02e607fc..b90a3a69c840d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java @@ -43,7 +43,7 @@ public OpenIdConnectAuthenticateRequest() { } public OpenIdConnectAuthenticateRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); redirectUri = in.readString(); state = in.readString(); nonce = in.readString(); @@ -96,11 +96,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(nonce); } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - public String toString() { return "{redirectUri=" + redirectUri + ", state=" + state + ", nonce=" + nonce + "}"; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java index 8ceafab05cc78..9598d619eed91 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java @@ -26,7 +26,7 @@ public OpenIdConnectAuthenticateResponse(String principal, String accessTokenStr } public OpenIdConnectAuthenticateResponse(StreamInput in) throws IOException { - super.readFrom(in); + super(in); principal = in.readString(); accessTokenString = in.readString(); refreshTokenString = in.readString(); @@ -49,11 +49,6 @@ public TimeValue getExpiresIn() { return expiresIn; } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(principal); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java index b1ab044dc0183..6e68e94dbecc9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.security.action.oidc; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class OpenIdConnectLogoutAction extends ActionType { @@ -14,11 +13,6 @@ public class OpenIdConnectLogoutAction extends ActionType getResponseReader() { - return OpenIdConnectLogoutResponse::new; + super(NAME, OpenIdConnectLogoutResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutRequest.java index 777df403ecab3..c935d82ab97aa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutRequest.java @@ -27,7 +27,7 @@ public OpenIdConnectLogoutRequest() { } public OpenIdConnectLogoutRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); token = in.readString(); refreshToken = in.readOptionalString(); } @@ -63,9 +63,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(token); out.writeOptionalString(refreshToken); } - - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutResponse.java index 86958b9f269b7..4e05a12cf63e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutResponse.java @@ -16,7 +16,7 @@ public final class OpenIdConnectLogoutResponse extends ActionResponse { private String endSessionUrl; public OpenIdConnectLogoutResponse(StreamInput in) throws IOException { - super.readFrom(in); + super(in); this.endSessionUrl = in.readString(); } @@ -24,11 +24,6 @@ public OpenIdConnectLogoutResponse(String endSessionUrl) { this.endSessionUrl = endSessionUrl; } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(endSessionUrl); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java index 44d6979500842..e4181d9ca74fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.security.action.oidc; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; public class OpenIdConnectPrepareAuthenticationAction extends ActionType { @@ -14,11 +13,7 @@ public class OpenIdConnectPrepareAuthenticationAction extends ActionType getResponseReader() { - return OpenIdConnectPrepareAuthenticationResponse::new; - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequest.java index 8f6d616981b39..d4bb4778f878b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequest.java @@ -78,7 +78,7 @@ public OpenIdConnectPrepareAuthenticationRequest() { } public OpenIdConnectPrepareAuthenticationRequest(StreamInput in) throws IOException { - super.readFrom(in); + super(in); realmName = in.readOptionalString(); issuer = in.readOptionalString(); loginHint = in.readOptionalString(); @@ -108,11 +108,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(nonce); } - @Override - public void readFrom(StreamInput in) { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - public String toString() { return "{realmName=" + realmName + ", issuer=" + issuer + ", login_hint=" + loginHint + ", state=" + state + ", nonce=" + nonce + "}"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java index 48978a50f1e08..34ccabaf0c297 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java @@ -36,7 +36,7 @@ public OpenIdConnectPrepareAuthenticationResponse(String authorizationEndpointUr } public OpenIdConnectPrepareAuthenticationResponse(StreamInput in) throws IOException { - super.readFrom(in); + super(in); authenticationRequestUrl = in.readString(); state = in.readString(); nonce = in.readString(); @@ -54,11 +54,6 @@ public String getNonce() { return nonce; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(authenticationRequestUrl); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java index edf62cf307161..6d038d1ea1b8e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.privilege; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for deleting application privileges. */ -public final class DeletePrivilegesAction extends StreamableResponseActionType { +public final class DeletePrivilegesAction extends ActionType { public static final DeletePrivilegesAction INSTANCE = new DeletePrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/privilege/delete"; private DeletePrivilegesAction() { - super(NAME); - } - - @Override - public DeletePrivilegesResponse newResponse() { - return new DeletePrivilegesResponse(); + super(NAME, DeletePrivilegesResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequest.java index d5ed78a482315..5f2eed1c66fb9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequest.java @@ -29,6 +29,13 @@ public final class DeletePrivilegesRequest extends ActionRequest private String[] privileges; private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + public DeletePrivilegesRequest(StreamInput in) throws IOException { + super(in); + application = in.readString(); + privileges = in.readStringArray(); + refreshPolicy = RefreshPolicy.readFrom(in); + } + public DeletePrivilegesRequest() { this(null, Strings.EMPTY_ARRAY); } @@ -82,14 +89,6 @@ public void privileges(String[] privileges) { this.privileges = privileges; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - application = in.readString(); - privileges = in.readStringArray(); - refreshPolicy = RefreshPolicy.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java index 23bc771ae369a..c0e369ac16b91 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java @@ -24,7 +24,9 @@ public final class DeletePrivilegesResponse extends ActionResponse implements To private Set found; - public DeletePrivilegesResponse() { + public DeletePrivilegesResponse(StreamInput in) throws IOException { + super(in); + this.found = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); } public DeletePrivilegesResponse(Collection found) { @@ -41,12 +43,6 @@ public Set found() { return this.found; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.found = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(found, StreamOutput::writeString); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesAction.java index 862fc449f6eba..436ccd0c5aa81 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.privilege; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for retrieving builtin privileges from security */ -public final class GetBuiltinPrivilegesAction extends StreamableResponseActionType { +public final class GetBuiltinPrivilegesAction extends ActionType { public static final GetBuiltinPrivilegesAction INSTANCE = new GetBuiltinPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/privilege/builtin/get"; private GetBuiltinPrivilegesAction() { - super(NAME); - } - - @Override - public GetBuiltinPrivilegesResponse newResponse() { - return new GetBuiltinPrivilegesResponse(); + super(NAME, GetBuiltinPrivilegesResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesRequest.java index a4f4eb9c8dd72..88970ab6e3ddc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesRequest.java @@ -7,12 +7,19 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; /** * Request to retrieve built-in (cluster/index) privileges. */ public final class GetBuiltinPrivilegesRequest extends ActionRequest { + public GetBuiltinPrivilegesRequest(StreamInput in) throws IOException { + super(in); + } + public GetBuiltinPrivilegesRequest() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponse.java index 5cc2536c8398a..b2e306f37f216 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponse.java @@ -37,6 +37,12 @@ public GetBuiltinPrivilegesResponse() { this(Collections.emptySet(), Collections.emptySet()); } + public GetBuiltinPrivilegesResponse(StreamInput in) throws IOException { + super(in); + this.clusterPrivileges = in.readStringArray(); + this.indexPrivileges = in.readStringArray(); + } + public String[] getClusterPrivileges() { return clusterPrivileges; } @@ -45,13 +51,6 @@ public String[] getIndexPrivileges() { return indexPrivileges; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.clusterPrivileges = in.readStringArray(); - this.indexPrivileges = in.readStringArray(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(clusterPrivileges); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesAction.java index b48bc474b4ebe..c697c3662f4a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.privilege; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for retrieving one or more application privileges from the security index */ -public final class GetPrivilegesAction extends StreamableResponseActionType { +public final class GetPrivilegesAction extends ActionType { public static final GetPrivilegesAction INSTANCE = new GetPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/privilege/get"; private GetPrivilegesAction() { - super(NAME); - } - - @Override - public GetPrivilegesResponse newResponse() { - return new GetPrivilegesResponse(); + super(NAME, GetPrivilegesResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequest.java index 9285dae76696b..47a7d744182db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequest.java @@ -27,6 +27,12 @@ public final class GetPrivilegesRequest extends ActionRequest implements Applica private String application; private String[] privileges; + public GetPrivilegesRequest(StreamInput in) throws IOException { + super(in); + application = in.readOptionalString(); + privileges = in.readStringArray(); + } + public GetPrivilegesRequest() { privileges = Strings.EMPTY_ARRAY; } @@ -61,13 +67,6 @@ public String[] privileges() { return this.privileges; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - application = in.readOptionalString(); - privileges = in.readStringArray(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesResponse.java index 5f9ae94ffd183..7feed67d15f13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesResponse.java @@ -29,20 +29,15 @@ public GetPrivilegesResponse(Collection privileg this(privileges.toArray(new ApplicationPrivilegeDescriptor[0])); } - public GetPrivilegesResponse() { - this(new ApplicationPrivilegeDescriptor[0]); + public GetPrivilegesResponse(StreamInput in) throws IOException { + super(in); + this.privileges = in.readArray(ApplicationPrivilegeDescriptor::new, ApplicationPrivilegeDescriptor[]::new); } public ApplicationPrivilegeDescriptor[] privileges() { return privileges; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.privileges = in.readArray(ApplicationPrivilegeDescriptor::new, ApplicationPrivilegeDescriptor[]::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(privileges); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesAction.java index 56d9b285cceec..ed7eb0f43e660 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.privilege; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for putting (adding/updating) one or more application privileges. */ -public final class PutPrivilegesAction extends StreamableResponseActionType { +public final class PutPrivilegesAction extends ActionType { public static final PutPrivilegesAction INSTANCE = new PutPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/privilege/put"; private PutPrivilegesAction() { - super(NAME); - } - - @Override - public PutPrivilegesResponse newResponse() { - return new PutPrivilegesResponse(); + super(NAME, PutPrivilegesResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequest.java index 62027045b116c..03e21358be5fc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequest.java @@ -31,6 +31,12 @@ public final class PutPrivilegesRequest extends ActionRequest implements Applica private List privileges; private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + public PutPrivilegesRequest(StreamInput in) throws IOException { + super(in); + privileges = Collections.unmodifiableList(in.readList(ApplicationPrivilegeDescriptor::new)); + refreshPolicy = RefreshPolicy.readFrom(in); + } + public PutPrivilegesRequest() { privileges = Collections.emptyList(); } @@ -111,13 +117,6 @@ public String toString() { + "];" + refreshPolicy + "}"; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - privileges = Collections.unmodifiableList(in.readList(ApplicationPrivilegeDescriptor::new)); - refreshPolicy = RefreshPolicy.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java index 541fa3d943ea0..4efb4f483050f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java @@ -24,8 +24,9 @@ public final class PutPrivilegesResponse extends ActionResponse implements ToXCo private Map> created; - PutPrivilegesResponse() { - this(Collections.emptyMap()); + public PutPrivilegesResponse(StreamInput in) throws IOException { + super(in); + this.created = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readStringList)); } public PutPrivilegesResponse(Map> created) { @@ -51,9 +52,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(created, StreamOutput::writeString, StreamOutput::writeStringCollection); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.created = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readStringList)); } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java index 902c9263beb40..73ce736b96833 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.realm; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ClearRealmCacheAction extends StreamableResponseActionType { +public class ClearRealmCacheAction extends ActionType { public static final ClearRealmCacheAction INSTANCE = new ClearRealmCacheAction(); public static final String NAME = "cluster:admin/xpack/security/realm/cache/clear"; protected ClearRealmCacheAction() { - super(NAME); - } - - @Override - public ClearRealmCacheResponse newResponse() { - return new ClearRealmCacheResponse(); + super(NAME, ClearRealmCacheResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequest.java index db84a3026471d..3aed8fc9c55b9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequest.java @@ -17,6 +17,17 @@ public class ClearRealmCacheRequest extends BaseNodesRequest implements ToXContentFragment { - public ClearRealmCacheResponse() { + public ClearRealmCacheResponse(StreamInput in) throws IOException { + super(in); } public ClearRealmCacheResponse(ClusterName clusterName, List nodes, List failures) { @@ -31,12 +32,12 @@ public ClearRealmCacheResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::readNodeResponse); + return in.readList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override @@ -67,18 +68,13 @@ public String toString() { public static class Node extends BaseNodeResponse { - public Node() { + public Node(StreamInput in) throws IOException { + super(in); } public Node(DiscoveryNode node) { super(node); } - - public static Node readNodeResponse(StreamInput in) throws IOException { - Node node = new Node(); - node.readFrom(in); - return node; - } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java index 10c9497bbbfa7..ad504f5c43463 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.role; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * The action for clearing the cache used by native roles that are stored in an index. */ -public class ClearRolesCacheAction extends StreamableResponseActionType { +public class ClearRolesCacheAction extends ActionType { public static final ClearRolesCacheAction INSTANCE = new ClearRolesCacheAction(); public static final String NAME = "cluster:admin/xpack/security/roles/cache/clear"; protected ClearRolesCacheAction() { - super(NAME); - } - - @Override - public ClearRolesCacheResponse newResponse() { - return new ClearRolesCacheResponse(); + super(NAME, ClearRolesCacheResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequest.java index 515038ae73039..582e3b32a2dfe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequest.java @@ -19,6 +19,14 @@ public class ClearRolesCacheRequest extends BaseNodesRequest implements ToXContentFragment { - public ClearRolesCacheResponse() { + public ClearRolesCacheResponse(StreamInput in) throws IOException { + super(in); } public ClearRolesCacheResponse(ClusterName clusterName, List nodes, List failures) { @@ -34,12 +35,12 @@ public ClearRolesCacheResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::readNodeResponse); + return in.readList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override @@ -70,17 +71,12 @@ public String toString() { public static class Node extends BaseNodeResponse { - public Node() { + public Node(StreamInput in) throws IOException { + super(in); } public Node(DiscoveryNode node) { super(node); } - - public static Node readNodeResponse(StreamInput in) throws IOException { - Node node = new Node(); - node.readFrom(in); - return node; - } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java index 7024ed1eb42c8..f8be215c8a8ea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java @@ -5,23 +5,18 @@ */ package org.elasticsearch.xpack.core.security.action.role; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for deleting a role from the security index */ -public class DeleteRoleAction extends StreamableResponseActionType { +public class DeleteRoleAction extends ActionType { public static final DeleteRoleAction INSTANCE = new DeleteRoleAction(); public static final String NAME = "cluster:admin/xpack/security/role/delete"; protected DeleteRoleAction() { - super(NAME); - } - - @Override - public DeleteRoleResponse newResponse() { - return new DeleteRoleResponse(); + super(NAME, DeleteRoleResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequest.java index ff4d416e2002c..c5e0ce6782106 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequest.java @@ -23,6 +23,12 @@ public class DeleteRoleRequest extends ActionRequest implements WriteRequest { +public class GetRolesAction extends ActionType { public static final GetRolesAction INSTANCE = new GetRolesAction(); public static final String NAME = "cluster:admin/xpack/security/role/get"; protected GetRolesAction() { - super(NAME); - } - - @Override - public GetRolesResponse newResponse() { - return new GetRolesResponse(); + super(NAME, GetRolesResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java index 25851be2f015d..9e3838c76359c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java @@ -22,6 +22,11 @@ public class GetRolesRequest extends ActionRequest { private String[] names = Strings.EMPTY_ARRAY; + public GetRolesRequest(StreamInput in) throws IOException { + super(in); + names = in.readStringArray(); + } + public GetRolesRequest() { } @@ -42,12 +47,6 @@ public String[] names() { return names; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - names = in.readStringArray(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java index 5e2f89a88bbdc..5fed87d12e72e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java @@ -19,6 +19,15 @@ public class GetRolesResponse extends ActionResponse { private RoleDescriptor[] roles; + public GetRolesResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + roles = new RoleDescriptor[size]; + for (int i = 0; i < size; i++) { + roles[i] = new RoleDescriptor(in); + } + } + public GetRolesResponse(RoleDescriptor... roles) { this.roles = roles; } @@ -31,16 +40,6 @@ public boolean hasRoles() { return roles.length > 0; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - roles = new RoleDescriptor[size]; - for (int i = 0; i < size; i++) { - roles[i] = new RoleDescriptor(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(roles.length); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java index 029adb6643000..8917aad827ed7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java @@ -5,23 +5,18 @@ */ package org.elasticsearch.xpack.core.security.action.role; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for adding a role to the security index */ -public class PutRoleAction extends StreamableResponseActionType { +public class PutRoleAction extends ActionType { public static final PutRoleAction INSTANCE = new PutRoleAction(); public static final String NAME = "cluster:admin/xpack/security/role/put"; protected PutRoleAction() { - super(NAME); - } - - @Override - public PutRoleResponse newResponse() { - return new PutRoleResponse(); + super(NAME, PutRoleResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index e19d9cebb64c1..1f69c441cc394 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -42,6 +42,22 @@ public class PutRoleRequest extends ActionRequest implements WriteRequest metadata; + public PutRoleRequest(StreamInput in) throws IOException { + super(in); + name = in.readString(); + clusterPrivileges = in.readStringArray(); + int indicesSize = in.readVInt(); + indicesPrivileges = new ArrayList<>(indicesSize); + for (int i = 0; i < indicesSize; i++) { + indicesPrivileges.add(new RoleDescriptor.IndicesPrivileges(in)); + } + applicationPrivileges = in.readList(RoleDescriptor.ApplicationResourcePrivileges::new); + conditionalClusterPrivileges = ConditionalClusterPrivileges.readArray(in); + runAs = in.readStringArray(); + refreshPolicy = RefreshPolicy.readFrom(in); + metadata = in.readMap(); + } + public PutRoleRequest() { } @@ -157,23 +173,6 @@ public Map metadata() { return metadata; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - name = in.readString(); - clusterPrivileges = in.readStringArray(); - int indicesSize = in.readVInt(); - indicesPrivileges = new ArrayList<>(indicesSize); - for (int i = 0; i < indicesSize; i++) { - indicesPrivileges.add(new RoleDescriptor.IndicesPrivileges(in)); - } - applicationPrivileges = in.readList(RoleDescriptor.ApplicationResourcePrivileges::new); - conditionalClusterPrivileges = ConditionalClusterPrivileges.readArray(in); - runAs = in.readStringArray(); - refreshPolicy = RefreshPolicy.readFrom(in); - metadata = in.readMap(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleResponse.java index 76f83faf59d86..3e00aa0ac8e85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleResponse.java @@ -21,7 +21,9 @@ public class PutRoleResponse extends ActionResponse implements ToXContentObject private boolean created; - public PutRoleResponse() { + public PutRoleResponse(StreamInput in) throws IOException { + super(in); + this.created = in.readBoolean(); } public PutRoleResponse(boolean created) { @@ -43,9 +45,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(created); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.created = in.readBoolean(); } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java index 7dc2680fe21be..84cd0de96a91c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java @@ -5,23 +5,18 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for deleting a role-mapping from the * org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore */ -public class DeleteRoleMappingAction extends StreamableResponseActionType { +public class DeleteRoleMappingAction extends ActionType { public static final DeleteRoleMappingAction INSTANCE = new DeleteRoleMappingAction(); public static final String NAME = "cluster:admin/xpack/security/role_mapping/delete"; private DeleteRoleMappingAction() { - super(NAME); - } - - @Override - public DeleteRoleMappingResponse newResponse() { - return new DeleteRoleMappingResponse(); + super(NAME, DeleteRoleMappingResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequest.java index 9d3e1758026ea..4bb67327d7b4e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequest.java @@ -5,14 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import java.io.IOException; - import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; + import static org.elasticsearch.action.ValidateActions.addValidationError; /** @@ -23,6 +23,12 @@ public class DeleteRoleMappingRequest extends ActionRequest implements WriteRequ private String name; private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + public DeleteRoleMappingRequest(StreamInput in) throws IOException { + super(in); + name = in.readString(); + refreshPolicy = RefreshPolicy.readFrom(in); + } + public DeleteRoleMappingRequest() { } @@ -54,13 +60,6 @@ public String getName() { return name; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - name = in.readString(); - refreshPolicy = RefreshPolicy.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingResponse.java index 2870ae7c24ca3..6d763b0a91aa2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingResponse.java @@ -21,10 +21,10 @@ public class DeleteRoleMappingResponse extends ActionResponse implements ToXCont private boolean found = false; - /** - * Package private for {@link DeleteRoleMappingAction#newResponse()} - */ - public DeleteRoleMappingResponse() {} + public DeleteRoleMappingResponse(StreamInput in) throws IOException { + super(in); + found = in.readBoolean(); + } public DeleteRoleMappingResponse(boolean found) { this.found = found; @@ -44,12 +44,6 @@ public boolean isFound() { return this.found; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - found = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(found); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java index 2792348111946..3479c0fb37acb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java @@ -5,24 +5,19 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType to retrieve one or more role-mappings from X-Pack security * * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore */ -public class GetRoleMappingsAction extends StreamableResponseActionType { +public class GetRoleMappingsAction extends ActionType { public static final GetRoleMappingsAction INSTANCE = new GetRoleMappingsAction(); public static final String NAME = "cluster:admin/xpack/security/role_mapping/get"; private GetRoleMappingsAction() { - super(NAME); - } - - @Override - public GetRoleMappingsResponse newResponse() { - return new GetRoleMappingsResponse(); + super(NAME, GetRoleMappingsResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequest.java index ff59aa8482d61..f26c62f49da95 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequest.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import java.io.IOException; - import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; @@ -14,6 +12,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import java.io.IOException; + import static org.elasticsearch.action.ValidateActions.addValidationError; /** @@ -25,6 +25,11 @@ public class GetRoleMappingsRequest extends ActionRequest { private String[] names = Strings.EMPTY_ARRAY; + public GetRoleMappingsRequest(StreamInput in) throws IOException { + super(in); + names = in.readStringArray(); + } + public GetRoleMappingsRequest() { } @@ -53,12 +58,6 @@ public String[] getNames() { return names; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - names = in.readStringArray(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsResponse.java index 7d43ff4fbad87..a3eebe37c216e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsResponse.java @@ -21,6 +21,15 @@ public class GetRoleMappingsResponse extends ActionResponse { private ExpressionRoleMapping[] mappings; + public GetRoleMappingsResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + mappings = new ExpressionRoleMapping[size]; + for (int i = 0; i < size; i++) { + mappings[i] = new ExpressionRoleMapping(in); + } + } + public GetRoleMappingsResponse(ExpressionRoleMapping... mappings) { this.mappings = mappings; } @@ -33,16 +42,6 @@ public boolean hasMappings() { return mappings.length > 0; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - mappings = new ExpressionRoleMapping[size]; - for (int i = 0; i < size; i++) { - mappings[i] = new ExpressionRoleMapping(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(mappings.length); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java index 3bb3e2f51496d..c09ffd694d152 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for adding a role to the security index */ -public class PutRoleMappingAction extends StreamableResponseActionType { +public class PutRoleMappingAction extends ActionType { public static final PutRoleMappingAction INSTANCE = new PutRoleMappingAction(); public static final String NAME = "cluster:admin/xpack/security/role_mapping/put"; private PutRoleMappingAction() { - super(NAME); - } - - @Override - public PutRoleMappingResponse newResponse() { - return new PutRoleMappingResponse(); + super(NAME, PutRoleMappingResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java index 43d005c5227a1..b25e309c1af89 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -42,6 +42,19 @@ public class PutRoleMappingRequest extends ActionRequest private Map metadata = Collections.emptyMap(); private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + public PutRoleMappingRequest(StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + this.enabled = in.readBoolean(); + this.roles = in.readStringList(); + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + this.roleTemplates = in.readList(TemplateRoleName::new); + } + this.rules = ExpressionParser.readExpression(in); + this.metadata = in.readMap(); + this.refreshPolicy = RefreshPolicy.readFrom(in); + } + public PutRoleMappingRequest() { } @@ -131,20 +144,6 @@ public Map getMetadata() { return metadata; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.name = in.readString(); - this.enabled = in.readBoolean(); - this.roles = in.readStringList(); - if (in.getVersion().onOrAfter(Version.V_7_2_0)) { - this.roleTemplates = in.readList(TemplateRoleName::new); - } - this.rules = ExpressionParser.readExpression(in); - this.metadata = in.readMap(); - this.refreshPolicy = RefreshPolicy.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingResponse.java index c3afc0a0d53c9..e7f5ea9ead319 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingResponse.java @@ -22,7 +22,9 @@ public class PutRoleMappingResponse extends ActionResponse implements ToXContent private boolean created; - public PutRoleMappingResponse() { + public PutRoleMappingResponse(StreamInput in) throws IOException { + super(in); + this.created = in.readBoolean(); } public PutRoleMappingResponse(boolean created) { @@ -44,9 +46,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(created); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.created = in.readBoolean(); } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java index fe40b521a1fff..090c8e8a35370 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for authenticating using SAML assertions */ -public final class SamlAuthenticateAction extends StreamableResponseActionType { +public final class SamlAuthenticateAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/saml/authenticate"; public static final SamlAuthenticateAction INSTANCE = new SamlAuthenticateAction(); private SamlAuthenticateAction() { - super(NAME); - } - - @Override - public SamlAuthenticateResponse newResponse() { - return new SamlAuthenticateResponse(); + super(NAME, SamlAuthenticateResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequest.java index 0d6d0f44c7110..40fce11edbc08 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequest.java @@ -5,10 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import java.util.List; - import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.List; /** * Represents a request to authenticate using SAML assertions. @@ -18,6 +20,10 @@ public final class SamlAuthenticateRequest extends ActionRequest { private byte[] saml; private List validRequestIds; + public SamlAuthenticateRequest(StreamInput in) throws IOException { + super(in); + } + public SamlAuthenticateRequest() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java index 52619c7a79400..aa11093236a0b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java @@ -23,7 +23,12 @@ public final class SamlAuthenticateResponse extends ActionResponse { private String refreshToken; private TimeValue expiresIn; - public SamlAuthenticateResponse() { + public SamlAuthenticateResponse(StreamInput in) throws IOException { + super(in); + principal = in.readString(); + tokenString = in.readString(); + refreshToken = in.readString(); + expiresIn = in.readTimeValue(); } public SamlAuthenticateResponse(String principal, String tokenString, String refreshToken, TimeValue expiresIn) { @@ -57,12 +62,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeTimeValue(expiresIn); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - principal = in.readString(); - tokenString = in.readString(); - refreshToken = in.readString(); - expiresIn = in.readTimeValue(); } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java index e6d74e36c794d..f2f39afca7ccb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType to perform IdP-initiated logout for a SAML-SSO user */ -public final class SamlInvalidateSessionAction extends StreamableResponseActionType { +public final class SamlInvalidateSessionAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/saml/invalidate"; public static final SamlInvalidateSessionAction INSTANCE = new SamlInvalidateSessionAction(); private SamlInvalidateSessionAction() { - super(NAME); - } - - @Override - public SamlInvalidateSessionResponse newResponse() { - return new SamlInvalidateSessionResponse(); + super(NAME, SamlInvalidateSessionResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequest.java index b2b49db838f31..5d88e6e7dcac7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequest.java @@ -9,6 +9,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -25,6 +28,10 @@ public final class SamlInvalidateSessionRequest extends ActionRequest { private String queryString; + public SamlInvalidateSessionRequest(StreamInput in) throws IOException { + super(in); + } + public SamlInvalidateSessionRequest() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionResponse.java index f18631ad63046..af051eb696fdd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionResponse.java @@ -20,7 +20,11 @@ public final class SamlInvalidateSessionResponse extends ActionResponse { private int count; private String redirectUrl; - public SamlInvalidateSessionResponse() { + public SamlInvalidateSessionResponse(StreamInput in) throws IOException { + super(in); + realmName = in.readString(); + count = in.readInt(); + redirectUrl = in.readString(); } public SamlInvalidateSessionResponse(String realmName, int count, String redirectUrl) { @@ -47,13 +51,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(count); out.writeString(redirectUrl); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - realmName = in.readString(); - count = in.readInt(); - redirectUrl = in.readString(); - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java index 2d97a29d190bf..684233b52e3ec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for initiating a logout process for a SAML-SSO user */ -public final class SamlLogoutAction extends StreamableResponseActionType { +public final class SamlLogoutAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/saml/logout"; public static final SamlLogoutAction INSTANCE = new SamlLogoutAction(); private SamlLogoutAction() { - super(NAME); - } - - @Override - public SamlLogoutResponse newResponse() { - return new SamlLogoutResponse(); + super(NAME, SamlLogoutResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequest.java index 45088fdd3d93a..0078cec9a7c87 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequest.java @@ -9,6 +9,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -21,6 +24,10 @@ public final class SamlLogoutRequest extends ActionRequest { @Nullable private String refreshToken; + public SamlLogoutRequest(StreamInput in) throws IOException { + super(in); + } + public SamlLogoutRequest() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutResponse.java index 95b1227c7d6d9..dc176b6113451 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutResponse.java @@ -18,7 +18,9 @@ public final class SamlLogoutResponse extends ActionResponse { private String redirectUrl; - public SamlLogoutResponse() { + public SamlLogoutResponse(StreamInput in) throws IOException { + super(in); + redirectUrl = in.readString(); } public SamlLogoutResponse(String redirectUrl) { @@ -34,10 +36,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(redirectUrl); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - redirectUrl = in.readString(); } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java index 11e5e38662767..4132acf5c4c6b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for initiating an authentication process using SAML assertions */ -public final class SamlPrepareAuthenticationAction extends StreamableResponseActionType { +public final class SamlPrepareAuthenticationAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/saml/prepare"; public static final SamlPrepareAuthenticationAction INSTANCE = new SamlPrepareAuthenticationAction(); private SamlPrepareAuthenticationAction() { - super(NAME); - } - - @Override - public SamlPrepareAuthenticationResponse newResponse() { - return new SamlPrepareAuthenticationResponse(); + super(NAME, SamlPrepareAuthenticationResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java index bd1c59a48b42b..21ec96ca9a253 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java @@ -5,14 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import java.io.IOException; - import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; + /** * Represents a request to prepare a SAML {@code <AuthnRequest>}. */ @@ -24,6 +24,12 @@ public final class SamlPrepareAuthenticationRequest extends ActionRequest { @Nullable private String assertionConsumerServiceURL; + public SamlPrepareAuthenticationRequest(StreamInput in) throws IOException { + super(in); + realmName = in.readOptionalString(); + assertionConsumerServiceURL = in.readOptionalString(); + } + public SamlPrepareAuthenticationRequest() { } @@ -56,13 +62,6 @@ public String toString() { '}'; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - realmName = in.readOptionalString(); - assertionConsumerServiceURL = in.readOptionalString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationResponse.java index 3d25f31210f61..942d7d0b0afef 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationResponse.java @@ -20,7 +20,9 @@ public final class SamlPrepareAuthenticationResponse extends ActionResponse { private String requestId; private String redirectUrl; - public SamlPrepareAuthenticationResponse() { + public SamlPrepareAuthenticationResponse(StreamInput in) throws IOException { + super(in); + redirectUrl = in.readString(); } public SamlPrepareAuthenticationResponse(String realmName, String requestId, String redirectUrl) { @@ -46,10 +48,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(redirectUrl); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - redirectUrl = in.readString(); } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java index 4ee84f6ccde3f..767c8b3f2a808 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for creating a new token */ -public final class CreateTokenAction extends StreamableResponseActionType { +public final class CreateTokenAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/token/create"; public static final CreateTokenAction INSTANCE = new CreateTokenAction(); private CreateTokenAction() { - super(NAME); - } - - @Override - public CreateTokenResponse newResponse() { - return new CreateTokenResponse(); + super(NAME, CreateTokenResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java index aeae2d8590774..6ff9eae64c06a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -68,6 +68,16 @@ public static GrantType fromString(String grantType) { private String scope; private String refreshToken; + public CreateTokenRequest(StreamInput in) throws IOException { + super(in); + grantType = in.readString(); + username = in.readOptionalString(); + password = in.readOptionalSecureString(); + refreshToken = in.readOptionalString(); + scope = in.readOptionalString(); + kerberosTicket = in.readOptionalSecureString(); + } + public CreateTokenRequest() {} public CreateTokenRequest(String grantType, @Nullable String username, @Nullable SecureString password, @@ -212,16 +222,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scope); out.writeOptionalSecureString(kerberosTicket); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - grantType = in.readString(); - username = in.readOptionalString(); - password = in.readOptionalSecureString(); - refreshToken = in.readOptionalString(); - scope = in.readOptionalString(); - kerberosTicket = in.readOptionalSecureString(); - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index ea72e83d99fd9..9464fef253c2f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -30,6 +30,15 @@ public final class CreateTokenResponse extends ActionResponse implements ToXCont CreateTokenResponse() {} + public CreateTokenResponse(StreamInput in) throws IOException { + super(in); + tokenString = in.readString(); + expiresIn = in.readTimeValue(); + scope = in.readOptionalString(); + refreshToken = in.readOptionalString(); + kerberosAuthenticationResponseToken = in.readOptionalString(); + } + public CreateTokenResponse(String tokenString, TimeValue expiresIn, String scope, String refreshToken, String kerberosAuthenticationResponseToken) { this.tokenString = Objects.requireNonNull(tokenString); @@ -68,16 +77,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(kerberosAuthenticationResponseToken); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - tokenString = in.readString(); - expiresIn = in.readTimeValue(); - scope = in.readOptionalString(); - refreshToken = in.readOptionalString(); - kerberosAuthenticationResponseToken = in.readOptionalString(); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java index 72082288c2cfd..4e57288efbde6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for invalidating one or more tokens */ -public final class InvalidateTokenAction extends StreamableResponseActionType { +public final class InvalidateTokenAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/token/invalidate"; public static final InvalidateTokenAction INSTANCE = new InvalidateTokenAction(); private InvalidateTokenAction() { - super(NAME); - } - - @Override - public InvalidateTokenResponse newResponse() { - return new InvalidateTokenResponse(); + super(NAME, InvalidateTokenResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequest.java index 43348456d2306..94f3ac7c15e94 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequest.java @@ -52,6 +52,15 @@ public static Type fromString(String tokenType) { private String realmName; private String userName; + public InvalidateTokenRequest(StreamInput in) throws IOException { + super(in); + tokenString = in.readOptionalString(); + Integer type = in.readOptionalVInt(); + tokenType = type == null ? null : Type.values()[type]; + realmName = in.readOptionalString(); + userName = in.readOptionalString(); + } + public InvalidateTokenRequest() {} /** @@ -141,14 +150,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(realmName); out.writeOptionalString(userName); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - tokenString = in.readOptionalString(); - Integer type = in.readOptionalVInt(); - tokenType = type == null ? null : Type.values()[type]; - realmName = in.readOptionalString(); - userName = in.readOptionalString(); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java index 7326b61a4b280..09f82f4084c2b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java @@ -25,6 +25,11 @@ public final class InvalidateTokenResponse extends ActionResponse implements ToX public InvalidateTokenResponse() {} + public InvalidateTokenResponse(StreamInput in) throws IOException { + super(in); + result = new TokensInvalidationResult(in); + } + public InvalidateTokenResponse(TokensInvalidationResult result) { this.result = result; } @@ -38,12 +43,6 @@ public void writeTo(StreamOutput out) throws IOException { result.writeTo(out); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - result = new TokensInvalidationResult(in); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { result.toXContent(builder, params); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java index 82dc98d4192c2..1760820fe32e5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public final class RefreshTokenAction extends StreamableResponseActionType { +public final class RefreshTokenAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/token/refresh"; public static final RefreshTokenAction INSTANCE = new RefreshTokenAction(); private RefreshTokenAction() { - super(NAME); - } - - @Override - public CreateTokenResponse newResponse() { - return new CreateTokenResponse(); + super(NAME, CreateTokenResponse::new); } } \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java index 606a32803c455..27d308729f55f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class AuthenticateAction extends StreamableResponseActionType { +public class AuthenticateAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/user/authenticate"; public static final AuthenticateAction INSTANCE = new AuthenticateAction(); public AuthenticateAction() { - super(NAME); - } - - @Override - public AuthenticateResponse newResponse() { - return new AuthenticateResponse(); + super(NAME, AuthenticateResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java index 1b1b5d8db6ca8..17a38263e8f15 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java @@ -16,6 +16,11 @@ public class AuthenticateRequest extends ActionRequest implements UserRequest { private String username; + public AuthenticateRequest(StreamInput in) throws IOException { + super(in); + username = in.readString(); + } + public AuthenticateRequest() {} public AuthenticateRequest(String username) { @@ -41,12 +46,6 @@ public String[] usernames() { return new String[] { username }; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - username = in.readString(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index 3e5f819292bcb..36faef7569794 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -16,7 +16,10 @@ public class AuthenticateResponse extends ActionResponse { private Authentication authentication; - public AuthenticateResponse() {} + public AuthenticateResponse(StreamInput in) throws IOException { + super(in); + authentication = new Authentication(in); + } public AuthenticateResponse(Authentication authentication){ this.authentication = authentication; @@ -31,10 +34,4 @@ public void writeTo(StreamOutput out) throws IOException { authentication.writeTo(out); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - authentication = new Authentication(in); } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java index 981d9cb67ec83..e72a969f2403e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class ChangePasswordAction extends StreamableResponseActionType { +public class ChangePasswordAction extends ActionType { public static final ChangePasswordAction INSTANCE = new ChangePasswordAction(); public static final String NAME = "cluster:admin/xpack/security/user/change_password"; protected ChangePasswordAction() { - super(NAME); - } - - @Override - public ChangePasswordResponse newResponse() { - return new ChangePasswordResponse(); + super(NAME, ChangePasswordResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java index b78b81c060080..031448bbff766 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java @@ -28,6 +28,15 @@ public class ChangePasswordRequest extends ActionRequest private char[] passwordHash; private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + public ChangePasswordRequest() {} + + public ChangePasswordRequest(StreamInput in) throws IOException { + super(in); + username = in.readString(); + passwordHash = CharArrays.utf8BytesToChars(BytesReference.toBytes(in.readBytesReference())); + refreshPolicy = RefreshPolicy.readFrom(in); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -76,14 +85,6 @@ public String[] usernames() { return new String[] { username }; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - username = in.readString(); - passwordHash = CharArrays.utf8BytesToChars(BytesReference.toBytes(in.readBytesReference())); - refreshPolicy = RefreshPolicy.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordResponse.java index 20240d5be9284..591fbd98a3a07 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordResponse.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.action.user; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -14,6 +15,10 @@ public class ChangePasswordResponse extends ActionResponse { public ChangePasswordResponse() {} + public ChangePasswordResponse(StreamInput in) throws IOException { + super(in); + } + @Override public void writeTo(StreamOutput out) throws IOException {} } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java index e9b693a7521b9..62eb50a288b88 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for deleting a native user. */ -public class DeleteUserAction extends StreamableResponseActionType { +public class DeleteUserAction extends ActionType { public static final DeleteUserAction INSTANCE = new DeleteUserAction(); public static final String NAME = "cluster:admin/xpack/security/user/delete"; protected DeleteUserAction() { - super(NAME); - } - - @Override - public DeleteUserResponse newResponse() { - return new DeleteUserResponse(); + super(NAME, DeleteUserResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequest.java index 6587576b515b3..edca26060c7e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequest.java @@ -23,6 +23,12 @@ public class DeleteUserRequest extends ActionRequest implements UserRequest, Wri private String username; private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + public DeleteUserRequest(StreamInput in) throws IOException { + super(in); + username = in.readString(); + refreshPolicy = RefreshPolicy.readFrom(in); + } + public DeleteUserRequest() { } @@ -63,13 +69,6 @@ public String[] usernames() { return new String[] { username }; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - username = in.readString(); - refreshPolicy = RefreshPolicy.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserResponse.java index 2f2f33800238f..616d47de068aa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserResponse.java @@ -21,7 +21,9 @@ public class DeleteUserResponse extends ActionResponse implements ToXContentObje private boolean found; - public DeleteUserResponse() { + public DeleteUserResponse(StreamInput in) throws IOException { + super(in); + found = in.readBoolean(); } public DeleteUserResponse(boolean found) { @@ -38,12 +40,6 @@ public boolean found() { return this.found; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - found = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(found); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesAction.java index ca5327dcc76f4..59b065351302e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType that lists the set of privileges held by a user. */ -public final class GetUserPrivilegesAction extends StreamableResponseActionType { +public final class GetUserPrivilegesAction extends ActionType { public static final GetUserPrivilegesAction INSTANCE = new GetUserPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/user/list_privileges"; private GetUserPrivilegesAction() { - super(NAME); - } - - @Override - public GetUserPrivilegesResponse newResponse() { - return new GetUserPrivilegesResponse(); + super(NAME, GetUserPrivilegesResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesRequest.java index 972e881cc38ea..490c7c72369f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesRequest.java @@ -54,16 +54,6 @@ public String[] usernames() { return new String[] { username }; } - /** - * Always throws {@link UnsupportedOperationException} as this object should be deserialized using - * the {@link #GetUserPrivilegesRequest(StreamInput)} constructor instead. - */ - @Override - @Deprecated - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("Use " + getClass() + " as Writeable not Streamable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java index af83e5cca455e..f3b7d43cda280 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java @@ -37,8 +37,13 @@ public final class GetUserPrivilegesResponse extends ActionResponse { private Set application; private Set runAs; - public GetUserPrivilegesResponse() { - this(Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet()); + public GetUserPrivilegesResponse(StreamInput in) throws IOException { + super(in); + cluster = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); + conditionalCluster = Collections.unmodifiableSet(in.readSet(ConditionalClusterPrivileges.READER)); + index = Collections.unmodifiableSet(in.readSet(Indices::new)); + application = Collections.unmodifiableSet(in.readSet(RoleDescriptor.ApplicationResourcePrivileges::new)); + runAs = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); } public GetUserPrivilegesResponse(Set cluster, Set conditionalCluster, @@ -72,15 +77,6 @@ public Set getRunAs() { return runAs; } - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - cluster = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - conditionalCluster = Collections.unmodifiableSet(in.readSet(ConditionalClusterPrivileges.READER)); - index = Collections.unmodifiableSet(in.readSet(Indices::new)); - application = Collections.unmodifiableSet(in.readSet(RoleDescriptor.ApplicationResourcePrivileges::new)); - runAs = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(cluster, StreamOutput::writeString); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java index 55479c220c100..85b21723a9367 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for retrieving a user from the security index */ -public class GetUsersAction extends StreamableResponseActionType { +public class GetUsersAction extends ActionType { public static final GetUsersAction INSTANCE = new GetUsersAction(); public static final String NAME = "cluster:admin/xpack/security/user/get"; protected GetUsersAction() { - super(NAME); - } - - @Override - public GetUsersResponse newResponse() { - return new GetUsersResponse(); + super(NAME, GetUsersResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java index 3ed0f798b371c..ade97805e790f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java @@ -22,6 +22,11 @@ public class GetUsersRequest extends ActionRequest implements UserRequest { private String[] usernames; + public GetUsersRequest(StreamInput in) throws IOException { + super(in); + usernames = in.readStringArray(); + } + public GetUsersRequest() { usernames = Strings.EMPTY_ARRAY; } @@ -44,12 +49,6 @@ public String[] usernames() { return usernames; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - usernames = in.readStringArray(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java index ac3d63a551ffe..62cc7e26416d4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java @@ -20,6 +20,19 @@ public class GetUsersResponse extends ActionResponse { private User[] users; + public GetUsersResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + if (size < 0) { + users = null; + } else { + users = new User[size]; + for (int i = 0; i < size; i++) { + users[i] = User.readFrom(in); + } + } + } + public GetUsersResponse(User... users) { this.users = users; } @@ -36,20 +49,6 @@ public boolean hasUsers() { return users != null && users.length > 0; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - if (size < 0) { - users = null; - } else { - users = new User[size]; - for (int i = 0; i < size; i++) { - users[i] = User.readFrom(in); - } - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(users == null ? -1 : users.length); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java index 8d05574d295ef..bc17be961b817 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java @@ -5,24 +5,19 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; /** * This action is testing whether a user has the specified * {@link RoleDescriptor.IndicesPrivileges privileges} */ -public class HasPrivilegesAction extends StreamableResponseActionType { +public class HasPrivilegesAction extends ActionType { public static final HasPrivilegesAction INSTANCE = new HasPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/user/has_privileges"; private HasPrivilegesAction() { - super(NAME); - } - - @Override - public HasPrivilegesResponse newResponse() { - return new HasPrivilegesResponse(); + super(NAME, HasPrivilegesResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java index 93ac7ff45dd66..2e65ef14abfdc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java @@ -27,6 +27,20 @@ public class HasPrivilegesRequest extends ActionRequest implements UserRequest { private RoleDescriptor.IndicesPrivileges[] indexPrivileges; private ApplicationResourcePrivileges[] applicationPrivileges; + public HasPrivilegesRequest() {} + + public HasPrivilegesRequest(StreamInput in) throws IOException { + super(in); + this.username = in.readString(); + this.clusterPrivileges = in.readStringArray(); + int indexSize = in.readVInt(); + indexPrivileges = new RoleDescriptor.IndicesPrivileges[indexSize]; + for (int i = 0; i < indexSize; i++) { + indexPrivileges[i] = new RoleDescriptor.IndicesPrivileges(in); + } + applicationPrivileges = in.readArray(ApplicationResourcePrivileges::new, ApplicationResourcePrivileges[]::new); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -98,19 +112,6 @@ public void applicationPrivileges(ApplicationResourcePrivileges... appPrivileges this.applicationPrivileges = appPrivileges; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.username = in.readString(); - this.clusterPrivileges = in.readStringArray(); - int indexSize = in.readVInt(); - indexPrivileges = new RoleDescriptor.IndicesPrivileges[indexSize]; - for (int i = 0; i < indexSize; i++) { - indexPrivileges[i] = new RoleDescriptor.IndicesPrivileges(in); - } - applicationPrivileges = in.readArray(ApplicationResourcePrivileges::new, ApplicationResourcePrivileges[]::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java index 1bf7c4285a52d..f2b11410f3b1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java @@ -36,6 +36,15 @@ public HasPrivilegesResponse() { this("", true, Collections.emptyMap(), Collections.emptyList(), Collections.emptyMap()); } + public HasPrivilegesResponse(StreamInput in) throws IOException { + super(in); + completeMatch = in.readBoolean(); + cluster = in.readMap(StreamInput::readString, StreamInput::readBoolean); + index = readResourcePrivileges(in); + application = in.readMap(StreamInput::readString, HasPrivilegesResponse::readResourcePrivileges); + username = in.readString(); + } + public HasPrivilegesResponse(String username, boolean completeMatch, Map cluster, Collection index, Map> application) { super(); @@ -99,15 +108,6 @@ public int hashCode() { return Objects.hash(username, completeMatch, cluster, index, application); } - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - completeMatch = in.readBoolean(); - cluster = in.readMap(StreamInput::readString, StreamInput::readBoolean); - index = readResourcePrivileges(in); - application = in.readMap(StreamInput::readString, HasPrivilegesResponse::readResourcePrivileges); - username = in.readString(); - } - private static Set readResourcePrivileges(StreamInput in) throws IOException { final int count = in.readVInt(); final Set set = new TreeSet<>(Comparator.comparing(o -> o.getResource())); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java index 7d4d66c4ea3cb..04af0e3e5c0af 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * ActionType for putting (adding/updating) a native user. */ -public class PutUserAction extends StreamableResponseActionType { +public class PutUserAction extends ActionType { public static final PutUserAction INSTANCE = new PutUserAction(); public static final String NAME = "cluster:admin/xpack/security/user/put"; protected PutUserAction() { - super(NAME); - } - - @Override - public PutUserResponse newResponse() { - return new PutUserResponse(); + super(NAME, PutUserResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java index d09b168e9669b..578793661f47a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java @@ -36,6 +36,18 @@ public class PutUserRequest extends ActionRequest implements UserRequest, WriteR private boolean enabled = true; private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + public PutUserRequest(StreamInput in) throws IOException { + super(in); + username = in.readString(); + passwordHash = readCharArrayFromStream(in); + roles = in.readStringArray(); + fullName = in.readOptionalString(); + email = in.readOptionalString(); + metadata = in.readBoolean() ? in.readMap() : null; + refreshPolicy = RefreshPolicy.readFrom(in); + enabled = in.readBoolean(); + } + public PutUserRequest() { } @@ -132,19 +144,6 @@ public String[] usernames() { return new String[] { username }; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - username = in.readString(); - passwordHash = readCharArrayFromStream(in); - roles = in.readStringArray(); - fullName = in.readOptionalString(); - email = in.readOptionalString(); - metadata = in.readBoolean() ? in.readMap() : null; - refreshPolicy = RefreshPolicy.readFrom(in); - enabled = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java index f0cea14c90de5..0e5866f61fb5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java @@ -22,7 +22,9 @@ public class PutUserResponse extends ActionResponse implements ToXContentObject private boolean created; - public PutUserResponse() { + public PutUserResponse(StreamInput in) throws IOException { + super(in); + this.created = in.readBoolean(); } public PutUserResponse(boolean created) { @@ -38,12 +40,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(created); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.created = in.readBoolean(); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.startObject() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java index 6073ba131d1b1..9cb6d8f9626f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * This action is for setting the enabled flag on a native or reserved user */ -public class SetEnabledAction extends StreamableResponseActionType { +public class SetEnabledAction extends ActionType { public static final SetEnabledAction INSTANCE = new SetEnabledAction(); public static final String NAME = "cluster:admin/xpack/security/user/set_enabled"; private SetEnabledAction() { - super(NAME); - } - - @Override - public SetEnabledResponse newResponse() { - return new SetEnabledResponse(); + super(NAME, SetEnabledResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequest.java index 664a46ae3e727..28e161dd8b372 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequest.java @@ -27,6 +27,15 @@ public class SetEnabledRequest extends ActionRequest implements UserRequest, Wri private String username; private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + public SetEnabledRequest() {} + + public SetEnabledRequest(StreamInput in) throws IOException { + super(in); + this.enabled = in.readBoolean(); + this.username = in.readString(); + this.refreshPolicy = RefreshPolicy.readFrom(in); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -88,14 +97,6 @@ public SetEnabledRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.enabled = in.readBoolean(); - this.username = in.readString(); - this.refreshPolicy = RefreshPolicy.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledResponse.java index 056305955dceb..fdb6a6a8aaa13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledResponse.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.action.user; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -14,6 +15,12 @@ * Empty response for a {@link SetEnabledRequest} */ public class SetEnabledResponse extends ActionResponse { + + public SetEnabledResponse() {} + + public SetEnabledResponse(StreamInput in) throws IOException { + super(in); + } @Override public void writeTo(StreamOutput out) throws IOException {} } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java index cae2f223bf6f0..452b1ba4d53d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java @@ -14,10 +14,13 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; import org.elasticsearch.xpack.core.security.support.Automatons; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.GetSnapshotLifecycleAction; import java.util.Collections; import java.util.HashSet; @@ -61,6 +64,9 @@ public final class ClusterPrivilege extends Privilege { private static final Automaton READ_CCR_AUTOMATON = patterns(ClusterStateAction.NAME, HasPrivilegesAction.NAME); private static final Automaton MANAGE_ILM_AUTOMATON = patterns("cluster:admin/ilm/*"); private static final Automaton READ_ILM_AUTOMATON = patterns(GetLifecycleAction.NAME, GetStatusAction.NAME); + private static final Automaton MANAGE_SLM_AUTOMATON = + patterns("cluster:admin/slm/*", StartILMAction.NAME, StopILMAction.NAME, GetStatusAction.NAME); + private static final Automaton READ_SLM_AUTOMATON = patterns(GetSnapshotLifecycleAction.NAME, GetStatusAction.NAME); public static final ClusterPrivilege NONE = new ClusterPrivilege("none", Automatons.EMPTY); public static final ClusterPrivilege ALL = new ClusterPrivilege("all", ALL_CLUSTER_AUTOMATON); @@ -92,6 +98,8 @@ public final class ClusterPrivilege extends Privilege { public static final ClusterPrivilege CREATE_SNAPSHOT = new ClusterPrivilege("create_snapshot", CREATE_SNAPSHOT_AUTOMATON); public static final ClusterPrivilege MANAGE_ILM = new ClusterPrivilege("manage_ilm", MANAGE_ILM_AUTOMATON); public static final ClusterPrivilege READ_ILM = new ClusterPrivilege("read_ilm", READ_ILM_AUTOMATON); + public static final ClusterPrivilege MANAGE_SLM = new ClusterPrivilege("manage_slm", MANAGE_SLM_AUTOMATON); + public static final ClusterPrivilege READ_SLM = new ClusterPrivilege("read_slm", READ_SLM_AUTOMATON); public static final Predicate ACTION_MATCHER = ClusterPrivilege.ALL.predicate(); @@ -121,7 +129,9 @@ public final class ClusterPrivilege extends Privilege { entry("read_ccr", READ_CCR), entry("create_snapshot", CREATE_SNAPSHOT), entry("manage_ilm", MANAGE_ILM), - entry("read_ilm", READ_ILM)); + entry("read_ilm", READ_ILM), + entry("manage_slm", MANAGE_SLM), + entry("read_slm", READ_SLM)); private static final ConcurrentHashMap, ClusterPrivilege> CACHE = new ConcurrentHashMap<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java index 6e2b9c1a7efdd..624b90125b0db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java @@ -131,6 +131,11 @@ protected ServerChannelInitializer getSslChannelInitializer(final String name, f return new SslChannelInitializer(name, sslConfiguration); } + @Override + public boolean isSecure() { + return this.sslEnabled; + } + private class SecurityClientChannelInitializer extends ClientChannelInitializer { private final boolean hostnameVerificationEnabled; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotInvocationRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotInvocationRecord.java new file mode 100644 index 0000000000000..a39153f991664 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotInvocationRecord.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle; + +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Holds information about Snapshots kicked off by Snapshot Lifecycle Management in the cluster state, so that this information can be + * presented to the user. This class is used for both successes and failures as the structure of the data is very similar. + */ +public class SnapshotInvocationRecord extends AbstractDiffable + implements Writeable, ToXContentObject, Diffable { + + static final ParseField SNAPSHOT_NAME = new ParseField("snapshot_name"); + static final ParseField TIMESTAMP = new ParseField("time"); + static final ParseField DETAILS = new ParseField("details"); + + private String snapshotName; + private long timestamp; + private String details; + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_policy_invocation_record", true, + a -> new SnapshotInvocationRecord((String) a[0], (long) a[1], (String) a[2])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), SNAPSHOT_NAME); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIMESTAMP); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), DETAILS); + } + + public static SnapshotInvocationRecord parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + public SnapshotInvocationRecord(String snapshotName, long timestamp, String details) { + this.snapshotName = Objects.requireNonNull(snapshotName, "snapshot name must be provided"); + this.timestamp = timestamp; + this.details = details; + } + + public SnapshotInvocationRecord(StreamInput in) throws IOException { + this.snapshotName = in.readString(); + this.timestamp = in.readVLong(); + this.details = in.readOptionalString(); + } + + public String getSnapshotName() { + return snapshotName; + } + + public long getTimestamp() { + return timestamp; + } + + public String getDetails() { + return details; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(snapshotName); + out.writeVLong(timestamp); + out.writeOptionalString(details); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(SNAPSHOT_NAME.getPreferredName(), snapshotName); + builder.timeField(TIMESTAMP.getPreferredName(), "time_string", timestamp); + if (Objects.nonNull(details)) { + builder.field(DETAILS.getPreferredName(), details); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SnapshotInvocationRecord that = (SnapshotInvocationRecord) o; + return getTimestamp() == that.getTimestamp() && + Objects.equals(getSnapshotName(), that.getSnapshotName()) && + Objects.equals(getDetails(), that.getDetails()); + } + + @Override + public int hashCode() { + return Objects.hash(getSnapshotName(), getTimestamp(), getDetails()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecycleMetadata.java new file mode 100644 index 0000000000000..542014b46dbe7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecycleMetadata.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackPlugin.XPackMetaDataCustom; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; + +import java.io.IOException; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Custom cluster state metadata that stores all the snapshot lifecycle + * policies and their associated metadata + */ +public class SnapshotLifecycleMetadata implements XPackMetaDataCustom { + + public static final String TYPE = "snapshot_lifecycle"; + public static final ParseField OPERATION_MODE_FIELD = new ParseField("operation_mode"); + public static final ParseField POLICIES_FIELD = new ParseField("policies"); + + public static final SnapshotLifecycleMetadata EMPTY = new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE, + a -> new SnapshotLifecycleMetadata( + ((List) a[0]).stream() + .collect(Collectors.toMap(m -> m.getPolicy().getId(), Function.identity())), + OperationMode.valueOf((String) a[1]))); + + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> SnapshotLifecyclePolicyMetadata.parse(p, n), + v -> { + throw new IllegalArgumentException("ordered " + POLICIES_FIELD.getPreferredName() + " are not supported"); + }, POLICIES_FIELD); + } + + private final Map snapshotConfigurations; + private final OperationMode operationMode; + + public SnapshotLifecycleMetadata(Map snapshotConfigurations, OperationMode operationMode) { + this.snapshotConfigurations = new HashMap<>(snapshotConfigurations); + this.operationMode = operationMode; + } + + public SnapshotLifecycleMetadata(StreamInput in) throws IOException { + this.snapshotConfigurations = in.readMap(StreamInput::readString, SnapshotLifecyclePolicyMetadata::new); + this.operationMode = in.readEnum(OperationMode.class); + } + + public Map getSnapshotConfigurations() { + return Collections.unmodifiableMap(this.snapshotConfigurations); + } + + public OperationMode getOperationMode() { + return operationMode; + } + + @Override + public EnumSet context() { + return MetaData.ALL_CONTEXTS; + } + + @Override + public Diff diff(MetaData.Custom previousState) { + return new SnapshotLifecycleMetadataDiff((SnapshotLifecycleMetadata) previousState, this); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_7_4_0; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.snapshotConfigurations, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeEnum(this.operationMode); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(POLICIES_FIELD.getPreferredName(), this.snapshotConfigurations); + builder.field(OPERATION_MODE_FIELD.getPreferredName(), operationMode); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public int hashCode() { + return Objects.hash(this.snapshotConfigurations, this.operationMode); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + SnapshotLifecycleMetadata other = (SnapshotLifecycleMetadata) obj; + return this.snapshotConfigurations.equals(other.snapshotConfigurations) && + this.operationMode.equals(other.operationMode); + } + + public static class SnapshotLifecycleMetadataDiff implements NamedDiff { + + final Diff> lifecycles; + final OperationMode operationMode; + + SnapshotLifecycleMetadataDiff(SnapshotLifecycleMetadata before, SnapshotLifecycleMetadata after) { + this.lifecycles = DiffableUtils.diff(before.snapshotConfigurations, after.snapshotConfigurations, + DiffableUtils.getStringKeySerializer()); + this.operationMode = after.operationMode; + } + + public SnapshotLifecycleMetadataDiff(StreamInput in) throws IOException { + this.lifecycles = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), + SnapshotLifecyclePolicyMetadata::new, + SnapshotLifecycleMetadataDiff::readLifecyclePolicyDiffFrom); + this.operationMode = in.readEnum(OperationMode.class); + } + + @Override + public MetaData.Custom apply(MetaData.Custom part) { + TreeMap newLifecycles = new TreeMap<>( + lifecycles.apply(((SnapshotLifecycleMetadata) part).snapshotConfigurations)); + return new SnapshotLifecycleMetadata(newLifecycles, this.operationMode); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + lifecycles.writeTo(out); + out.writeEnum(this.operationMode); + } + + static Diff readLifecyclePolicyDiffFrom(StreamInput in) throws IOException { + return AbstractDiffable.readDiffFrom(SnapshotLifecyclePolicyMetadata::new, in); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicy.java new file mode 100644 index 0000000000000..e1d760702bde3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicy.java @@ -0,0 +1,328 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.scheduler.Cron; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.MAX_INDEX_NAME_BYTES; + +/** + * A {@code SnapshotLifecyclePolicy} is a policy for the cluster including a schedule of when a + * snapshot should be triggered, what the snapshot should be named, what repository it should go + * to, and the configuration for the snapshot itself. + */ +public class SnapshotLifecyclePolicy extends AbstractDiffable + implements Writeable, Diffable, ToXContentObject { + + private final String id; + private final String name; + private final String schedule; + private final String repository; + private final Map configuration; + + private static final ParseField NAME = new ParseField("name"); + private static final ParseField SCHEDULE = new ParseField("schedule"); + private static final ParseField REPOSITORY = new ParseField("repository"); + private static final ParseField CONFIG = new ParseField("config"); + private static final IndexNameExpressionResolver.DateMathExpressionResolver DATE_MATH_RESOLVER = + new IndexNameExpressionResolver.DateMathExpressionResolver(); + private static final String POLICY_ID_METADATA_FIELD = "policy"; + private static final String METADATA_FIELD_NAME = "metadata"; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_lifecycle", true, + (a, id) -> { + String name = (String) a[0]; + String schedule = (String) a[1]; + String repo = (String) a[2]; + Map config = (Map) a[3]; + return new SnapshotLifecyclePolicy(id, name, schedule, repo, config); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), SCHEDULE); + PARSER.declareString(ConstructingObjectParser.constructorArg(), REPOSITORY); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(), CONFIG); + } + + public SnapshotLifecyclePolicy(final String id, final String name, final String schedule, + final String repository, @Nullable Map configuration) { + this.id = Objects.requireNonNull(id, "policy id is required"); + this.name = Objects.requireNonNull(name, "policy snapshot name is required"); + this.schedule = Objects.requireNonNull(schedule, "policy schedule is required"); + this.repository = Objects.requireNonNull(repository, "policy snapshot repository is required"); + this.configuration = configuration; + } + + public SnapshotLifecyclePolicy(StreamInput in) throws IOException { + this.id = in.readString(); + this.name = in.readString(); + this.schedule = in.readString(); + this.repository = in.readString(); + this.configuration = in.readMap(); + } + + public String getId() { + return this.id; + } + + public String getName() { + return this.name; + } + + public String getSchedule() { + return this.schedule; + } + + public String getRepository() { + return this.repository; + } + + @Nullable + public Map getConfig() { + return this.configuration; + } + + public long calculateNextExecution() { + final Cron schedule = new Cron(this.schedule); + return schedule.getNextValidTimeAfter(System.currentTimeMillis()); + } + + public ActionRequestValidationException validate() { + ActionRequestValidationException err = new ActionRequestValidationException(); + + // ID validation + if (id.contains(",")) { + err.addValidationError("invalid policy id [" + id + "]: must not contain ','"); + } + if (id.contains(" ")) { + err.addValidationError("invalid policy id [" + id + "]: must not contain spaces"); + } + if (id.charAt(0) == '_') { + err.addValidationError("invalid policy id [" + id + "]: must not start with '_'"); + } + int byteCount = id.getBytes(StandardCharsets.UTF_8).length; + if (byteCount > MAX_INDEX_NAME_BYTES) { + err.addValidationError("invalid policy id [" + id + "]: name is too long, (" + byteCount + " > " + + MAX_INDEX_NAME_BYTES + " bytes)"); + } + + // Snapshot name validation + // We generate a snapshot name here to make sure it validates after applying date math + final String snapshotName = generateSnapshotName(new ResolverContext()); + if (Strings.hasText(name) == false) { + err.addValidationError("invalid snapshot name [" + name + "]: cannot be empty"); + } + if (snapshotName.contains("#")) { + err.addValidationError("invalid snapshot name [" + name + "]: must not contain '#'"); + } + if (snapshotName.charAt(0) == '_') { + err.addValidationError("invalid snapshot name [" + name + "]: must not start with '_'"); + } + if (snapshotName.toLowerCase(Locale.ROOT).equals(snapshotName) == false) { + err.addValidationError("invalid snapshot name [" + name + "]: must be lowercase"); + } + if (Strings.validFileName(snapshotName) == false) { + err.addValidationError("invalid snapshot name [" + name + "]: must not contain contain the following characters " + + Strings.INVALID_FILENAME_CHARS); + } + + // Schedule validation + if (Strings.hasText(schedule) == false) { + err.addValidationError("invalid schedule [" + schedule + "]: must not be empty"); + } else { + try { + new Cron(schedule); + } catch (IllegalArgumentException e) { + err.addValidationError("invalid schedule: " + + ExceptionsHelper.unwrapCause(e).getMessage()); + } + } + + if (configuration != null && configuration.containsKey(METADATA_FIELD_NAME)) { + if (configuration.get(METADATA_FIELD_NAME) instanceof Map == false) { + err.addValidationError("invalid configuration." + METADATA_FIELD_NAME + " [" + configuration.get(METADATA_FIELD_NAME) + + "]: must be an object if present"); + } else { + @SuppressWarnings("unchecked") + Map metadata = (Map) configuration.get(METADATA_FIELD_NAME); + if (metadata.containsKey(POLICY_ID_METADATA_FIELD)) { + err.addValidationError("invalid configuration." + METADATA_FIELD_NAME + ": field name [" + POLICY_ID_METADATA_FIELD + + "] is reserved and will be added automatically"); + } else { + Map metadataWithPolicyField = addPolicyNameToMetadata(metadata); + int serializedSizeOriginal = CreateSnapshotRequest.metadataSize(metadata); + int serializedSizeWithMetadata = CreateSnapshotRequest.metadataSize(metadataWithPolicyField); + int policyNameAddedBytes = serializedSizeWithMetadata - serializedSizeOriginal; + if (serializedSizeWithMetadata > CreateSnapshotRequest.MAXIMUM_METADATA_BYTES) { + err.addValidationError("invalid configuration." + METADATA_FIELD_NAME + ": must be smaller than [" + + (CreateSnapshotRequest.MAXIMUM_METADATA_BYTES - policyNameAddedBytes) + + "] bytes, but is [" + serializedSizeOriginal + "] bytes"); + } + } + } + } + + // Repository validation, validation of whether the repository actually exists happens + // elsewhere as it requires cluster state + if (Strings.hasText(repository) == false) { + err.addValidationError("invalid repository name [" + repository + "]: cannot be empty"); + } + + return err.validationErrors().size() == 0 ? null : err; + } + + private Map addPolicyNameToMetadata(final Map metadata) { + Map newMetadata; + if (metadata == null) { + newMetadata = new HashMap<>(); + } else { + newMetadata = new HashMap<>(metadata); + } + newMetadata.put(POLICY_ID_METADATA_FIELD, this.id); + return newMetadata; + } + + /** + * Since snapshots need to be uniquely named, this method will resolve any date math used in + * the provided name, as well as appending a unique identifier so expressions that may overlap + * still result in unique snapshot names. + */ + public String generateSnapshotName(Context context) { + List candidates = DATE_MATH_RESOLVER.resolve(context, Collections.singletonList(this.name)); + if (candidates.size() != 1) { + throw new IllegalStateException("resolving snapshot name " + this.name + " generated more than one candidate: " + candidates); + } + // TODO: we are breaking the rules of UUIDs by lowercasing this here, find an alternative (snapshot names must be lowercase) + return candidates.get(0) + "-" + UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT); + } + + /** + * Generate a new create snapshot request from this policy. The name of the snapshot is + * generated at this time based on any date math expressions in the "name" field. + */ + public CreateSnapshotRequest toRequest() { + CreateSnapshotRequest req = new CreateSnapshotRequest(repository, generateSnapshotName(new ResolverContext())); + @SuppressWarnings("unchecked") + Map metadata = (Map) configuration.get("metadata"); + Map metadataWithAddedPolicyName = addPolicyNameToMetadata(metadata); + Map mergedConfiguration = new HashMap<>(configuration); + mergedConfiguration.put("metadata", metadataWithAddedPolicyName); + req.source(mergedConfiguration); + req.waitForCompletion(false); + return req; + } + + public static SnapshotLifecyclePolicy parse(XContentParser parser, String id) { + return PARSER.apply(parser, id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.id); + out.writeString(this.name); + out.writeString(this.schedule); + out.writeString(this.repository); + out.writeMap(this.configuration); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NAME.getPreferredName(), this.name); + builder.field(SCHEDULE.getPreferredName(), this.schedule); + builder.field(REPOSITORY.getPreferredName(), this.repository); + if (this.configuration != null) { + builder.field(CONFIG.getPreferredName(), this.configuration); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, schedule, repository, configuration); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (obj.getClass() != getClass()) { + return false; + } + SnapshotLifecyclePolicy other = (SnapshotLifecyclePolicy) obj; + return Objects.equals(id, other.id) && + Objects.equals(name, other.name) && + Objects.equals(schedule, other.schedule) && + Objects.equals(repository, other.repository) && + Objects.equals(configuration, other.configuration); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + /** + * This is a context for the DateMathExpressionResolver, which does not require + * {@code IndicesOptions} or {@code ClusterState} since it only uses the start + * time to resolve expressions + */ + public static final class ResolverContext extends Context { + public ResolverContext() { + this(System.currentTimeMillis()); + } + + public ResolverContext(long startTime) { + super(null, null, startTime, false, false); + } + + @Override + public ClusterState getState() { + throw new UnsupportedOperationException("should never be called"); + } + + @Override + public IndicesOptions getOptions() { + throw new UnsupportedOperationException("should never be called"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyItem.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyItem.java new file mode 100644 index 0000000000000..4e860e331d3d8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyItem.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * The {@code SnapshotLifecyclePolicyItem} class is a special wrapper almost exactly like the + * {@link SnapshotLifecyclePolicyMetadata}, however, it elides the headers to ensure that they + * are not leaked to the user since they may contain sensitive information. + */ +public class SnapshotLifecyclePolicyItem implements ToXContentFragment, Writeable { + + private final SnapshotLifecyclePolicy policy; + private final long version; + private final long modifiedDate; + + @Nullable + private final SnapshotInvocationRecord lastSuccess; + + @Nullable + private final SnapshotInvocationRecord lastFailure; + public SnapshotLifecyclePolicyItem(SnapshotLifecyclePolicyMetadata policyMetadata) { + this.policy = policyMetadata.getPolicy(); + this.version = policyMetadata.getVersion(); + this.modifiedDate = policyMetadata.getModifiedDate(); + this.lastSuccess = policyMetadata.getLastSuccess(); + this.lastFailure = policyMetadata.getLastFailure(); + } + + public SnapshotLifecyclePolicyItem(StreamInput in) throws IOException { + this.policy = new SnapshotLifecyclePolicy(in); + this.version = in.readVLong(); + this.modifiedDate = in.readVLong(); + this.lastSuccess = in.readOptionalWriteable(SnapshotInvocationRecord::new); + this.lastFailure = in.readOptionalWriteable(SnapshotInvocationRecord::new); + } + + // For testing + + SnapshotLifecyclePolicyItem(SnapshotLifecyclePolicy policy, long version, long modifiedDate, + SnapshotInvocationRecord lastSuccess, SnapshotInvocationRecord lastFailure) { + this.policy = policy; + this.version = version; + this.modifiedDate = modifiedDate; + this.lastSuccess = lastSuccess; + this.lastFailure = lastFailure; + } + public SnapshotLifecyclePolicy getPolicy() { + return policy; + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + public SnapshotInvocationRecord getLastSuccess() { + return lastSuccess; + } + + public SnapshotInvocationRecord getLastFailure() { + return lastFailure; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + policy.writeTo(out); + out.writeVLong(version); + out.writeVLong(modifiedDate); + out.writeOptionalWriteable(lastSuccess); + out.writeOptionalWriteable(lastFailure); + } + + @Override + public int hashCode() { + return Objects.hash(policy, version, modifiedDate, lastSuccess, lastFailure); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + SnapshotLifecyclePolicyItem other = (SnapshotLifecyclePolicyItem) obj; + return policy.equals(other.policy) && + version == other.version && + modifiedDate == other.modifiedDate && + Objects.equals(lastSuccess, other.lastSuccess) && + Objects.equals(lastFailure, other.lastFailure); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(policy.getId()); + builder.field(SnapshotLifecyclePolicyMetadata.VERSION.getPreferredName(), version); + builder.timeField(SnapshotLifecyclePolicyMetadata.MODIFIED_DATE_MILLIS.getPreferredName(), + SnapshotLifecyclePolicyMetadata.MODIFIED_DATE.getPreferredName(), modifiedDate); + builder.field(SnapshotLifecyclePolicyMetadata.POLICY.getPreferredName(), policy); + if (lastSuccess != null) { + builder.field(SnapshotLifecyclePolicyMetadata.LAST_SUCCESS.getPreferredName(), lastSuccess); + } + if (lastFailure != null) { + builder.field(SnapshotLifecyclePolicyMetadata.LAST_FAILURE.getPreferredName(), lastFailure); + } + builder.timeField(SnapshotLifecyclePolicyMetadata.NEXT_EXECUTION_MILLIS.getPreferredName(), + SnapshotLifecyclePolicyMetadata.NEXT_EXECUTION.getPreferredName(), policy.calculateNextExecution()); + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyMetadata.java new file mode 100644 index 0000000000000..6abd43df35576 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyMetadata.java @@ -0,0 +1,260 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle; + +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +/** + * {@code SnapshotLifecyclePolicyMetadata} encapsulates a {@link SnapshotLifecyclePolicy} as well as + * the additional meta information link headers used for execution, version (a monotonically + * incrementing number), and last modified date + */ +public class SnapshotLifecyclePolicyMetadata extends AbstractDiffable + implements ToXContentObject, Diffable { + + static final ParseField POLICY = new ParseField("policy"); + static final ParseField HEADERS = new ParseField("headers"); + static final ParseField VERSION = new ParseField("version"); + static final ParseField MODIFIED_DATE_MILLIS = new ParseField("modified_date_millis"); + static final ParseField MODIFIED_DATE = new ParseField("modified_date"); + static final ParseField LAST_SUCCESS = new ParseField("last_success"); + static final ParseField LAST_FAILURE = new ParseField("last_failure"); + static final ParseField NEXT_EXECUTION_MILLIS = new ParseField("next_execution_millis"); + static final ParseField NEXT_EXECUTION = new ParseField("next_execution"); + + private final SnapshotLifecyclePolicy policy; + private final Map headers; + private final long version; + private final long modifiedDate; + @Nullable + private final SnapshotInvocationRecord lastSuccess; + @Nullable + private final SnapshotInvocationRecord lastFailure; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_policy_metadata", + a -> { + SnapshotLifecyclePolicy policy = (SnapshotLifecyclePolicy) a[0]; + SnapshotInvocationRecord lastSuccess = (SnapshotInvocationRecord) a[4]; + SnapshotInvocationRecord lastFailure = (SnapshotInvocationRecord) a[5]; + + return builder() + .setPolicy(policy) + .setHeaders((Map) a[1]) + .setVersion((long) a[2]) + .setModifiedDate((long) a[3]) + .setLastSuccess(lastSuccess) + .setLastFailure(lastFailure) + .build(); + }); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), SnapshotLifecyclePolicy::parse, POLICY); + PARSER.declareField(ConstructingObjectParser.constructorArg(), XContentParser::mapStrings, HEADERS, ObjectParser.ValueType.OBJECT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_MILLIS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SnapshotInvocationRecord::parse, LAST_SUCCESS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SnapshotInvocationRecord::parse, LAST_FAILURE); + } + + public static SnapshotLifecyclePolicyMetadata parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + SnapshotLifecyclePolicyMetadata(SnapshotLifecyclePolicy policy, Map headers, long version, long modifiedDate, + SnapshotInvocationRecord lastSuccess, SnapshotInvocationRecord lastFailure) { + this.policy = policy; + this.headers = headers; + this.version = version; + this.modifiedDate = modifiedDate; + this.lastSuccess = lastSuccess; + this.lastFailure = lastFailure; + } + + @SuppressWarnings("unchecked") + SnapshotLifecyclePolicyMetadata(StreamInput in) throws IOException { + this.policy = new SnapshotLifecyclePolicy(in); + this.headers = (Map) in.readGenericValue(); + this.version = in.readVLong(); + this.modifiedDate = in.readVLong(); + this.lastSuccess = in.readOptionalWriteable(SnapshotInvocationRecord::new); + this.lastFailure = in.readOptionalWriteable(SnapshotInvocationRecord::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + this.policy.writeTo(out); + out.writeGenericValue(this.headers); + out.writeVLong(this.version); + out.writeVLong(this.modifiedDate); + out.writeOptionalWriteable(this.lastSuccess); + out.writeOptionalWriteable(this.lastFailure); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(SnapshotLifecyclePolicyMetadata metadata) { + if (metadata == null) { + return builder(); + } + return new Builder() + .setHeaders(metadata.getHeaders()) + .setPolicy(metadata.getPolicy()) + .setVersion(metadata.getVersion()) + .setModifiedDate(metadata.getModifiedDate()) + .setLastSuccess(metadata.getLastSuccess()) + .setLastFailure(metadata.getLastFailure()); + } + + public Map getHeaders() { + return headers; + } + + public SnapshotLifecyclePolicy getPolicy() { + return policy; + } + + public String getName() { + return policy.getName(); + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + public SnapshotInvocationRecord getLastSuccess() { + return lastSuccess; + } + + public SnapshotInvocationRecord getLastFailure() { + return lastFailure; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY.getPreferredName(), policy); + builder.field(HEADERS.getPreferredName(), headers); + builder.field(VERSION.getPreferredName(), version); + builder.timeField(MODIFIED_DATE_MILLIS.getPreferredName(), MODIFIED_DATE.getPreferredName(), modifiedDate); + if (Objects.nonNull(lastSuccess)) { + builder.field(LAST_SUCCESS.getPreferredName(), lastSuccess); + } + if (Objects.nonNull(lastFailure)) { + builder.field(LAST_FAILURE.getPreferredName(), lastFailure); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(policy, headers, version, modifiedDate, lastSuccess, lastFailure); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SnapshotLifecyclePolicyMetadata other = (SnapshotLifecyclePolicyMetadata) obj; + return Objects.equals(policy, other.policy) && + Objects.equals(headers, other.headers) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate) && + Objects.equals(lastSuccess, other.lastSuccess) && + Objects.equals(lastFailure, other.lastFailure); + } + + @Override + public String toString() { + // Note: this is on purpose. While usually we would use Strings.toString(this) to render + // this using toXContent, it may contain sensitive information in the headers and thus + // should not emit them in case it accidentally gets logged. + return super.toString(); + } + + public static class Builder { + + private Builder() { + } + + private SnapshotLifecyclePolicy policy; + private Map headers; + private long version = 1L; + private Long modifiedDate; + private SnapshotInvocationRecord lastSuccessDate; + private SnapshotInvocationRecord lastFailureDate; + + public Builder setPolicy(SnapshotLifecyclePolicy policy) { + this.policy = policy; + return this; + } + + public Builder setHeaders(Map headers) { + this.headers = headers; + return this; + } + + public Builder setVersion(long version) { + this.version = version; + return this; + } + + public Builder setModifiedDate(long modifiedDate) { + this.modifiedDate = modifiedDate; + return this; + } + + public Builder setLastSuccess(SnapshotInvocationRecord lastSuccessDate) { + this.lastSuccessDate = lastSuccessDate; + return this; + } + + public Builder setLastFailure(SnapshotInvocationRecord lastFailureDate) { + this.lastFailureDate = lastFailureDate; + return this; + } + + public SnapshotLifecyclePolicyMetadata build() { + return new SnapshotLifecyclePolicyMetadata( + Objects.requireNonNull(policy), + Optional.ofNullable(headers).orElse(new HashMap<>()), + version, + Objects.requireNonNull(modifiedDate, "modifiedDate must be set"), + lastSuccessDate, + lastFailureDate); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/DeleteSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/DeleteSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..8d325f675e4b2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/DeleteSnapshotLifecycleAction.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteSnapshotLifecycleAction extends ActionType { + public static final DeleteSnapshotLifecycleAction INSTANCE = new DeleteSnapshotLifecycleAction(); + public static final String NAME = "cluster:admin/slm/delete"; + + protected DeleteSnapshotLifecycleAction() { + super(NAME, DeleteSnapshotLifecycleAction.Response::new); + } + + public static class Request extends AcknowledgedRequest { + + private String lifecycleId; + + public Request(StreamInput in) throws IOException { + super(in); + lifecycleId = in.readString(); + } + + public Request() { } + + public Request(String lifecycleId) { + this.lifecycleId = Objects.requireNonNull(lifecycleId, "id may not be null"); + } + + public String getLifecycleId() { + return this.lifecycleId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(lifecycleId); + } + + @Override + public int hashCode() { + return lifecycleId.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(lifecycleId, other.lifecycleId); + } + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response(boolean acknowledged) { + super(acknowledged); + } + + public Response(StreamInput streamInput) throws IOException { + this(streamInput.readBoolean()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/ExecuteSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/ExecuteSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..61a907ab78053 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/ExecuteSnapshotLifecycleAction.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Action used to manually invoke a create snapshot request for a given + * snapshot lifecycle policy regardless of schedule. + */ +public class ExecuteSnapshotLifecycleAction extends ActionType { + public static final ExecuteSnapshotLifecycleAction INSTANCE = new ExecuteSnapshotLifecycleAction(); + public static final String NAME = "cluster:admin/slm/execute"; + + protected ExecuteSnapshotLifecycleAction() { + super(NAME, ExecuteSnapshotLifecycleAction.Response::new); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + private String lifecycleId; + + public Request(String lifecycleId) { + this.lifecycleId = lifecycleId; + } + + public Request(StreamInput in) throws IOException { + super(in); + lifecycleId = in.readString(); + } + + public Request() { } + + public String getLifecycleId() { + return this.lifecycleId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(lifecycleId); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(lifecycleId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return lifecycleId.equals(other.lifecycleId); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final String snapshotName; + + public Response(String snapshotName) { + this.snapshotName = snapshotName; + } + + public String getSnapshotName() { + return this.snapshotName; + } + + public Response(StreamInput in) throws IOException { + this(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.snapshotName); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("snapshot_name", getSnapshotName()); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/GetSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/GetSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..d9aa0a30a843c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/GetSnapshotLifecycleAction.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyItem; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class GetSnapshotLifecycleAction extends ActionType { + public static final GetSnapshotLifecycleAction INSTANCE = new GetSnapshotLifecycleAction(); + public static final String NAME = "cluster:admin/slm/get"; + + protected GetSnapshotLifecycleAction() { + super(NAME, GetSnapshotLifecycleAction.Response::new); + } + + public static class Request extends AcknowledgedRequest { + + private String[] lifecycleIds; + + public Request(String... lifecycleIds) { + this.lifecycleIds = Objects.requireNonNull(lifecycleIds, "ids may not be null"); + } + + public Request(StreamInput in) throws IOException { + super(in); + lifecycleIds = in.readStringArray(); + } + + public Request() { + this.lifecycleIds = Strings.EMPTY_ARRAY; + } + + public String[] getLifecycleIds() { + return this.lifecycleIds; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(lifecycleIds); + } + + @Override + public int hashCode() { + return Arrays.hashCode(lifecycleIds); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Arrays.equals(lifecycleIds, other.lifecycleIds); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private List lifecycles; + + public Response() { } + + public Response(List lifecycles) { + this.lifecycles = lifecycles; + } + + public Response(StreamInput in) throws IOException { + this.lifecycles = in.readList(SnapshotLifecyclePolicyItem::new); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (SnapshotLifecyclePolicyItem item : lifecycles) { + item.toXContent(builder, params); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(lifecycles); + } + + @Override + public int hashCode() { + return Objects.hash(lifecycles); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Response other = (Response) obj; + return lifecycles.equals(other.lifecycles); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/PutSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/PutSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..691f52c8229ac --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/PutSnapshotLifecycleAction.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy; + +import java.io.IOException; +import java.util.Objects; + +public class PutSnapshotLifecycleAction extends ActionType { + public static final PutSnapshotLifecycleAction INSTANCE = new PutSnapshotLifecycleAction(); + public static final String NAME = "cluster:admin/slm/put"; + + protected PutSnapshotLifecycleAction() { + super(NAME, PutSnapshotLifecycleAction.Response::new); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + private String lifecycleId; + private SnapshotLifecyclePolicy lifecycle; + + public Request(String lifecycleId, SnapshotLifecyclePolicy lifecycle) { + this.lifecycleId = lifecycleId; + this.lifecycle = lifecycle; + } + + public Request(StreamInput in) throws IOException { + super(in); + lifecycleId = in.readString(); + lifecycle = new SnapshotLifecyclePolicy(in); + } + + public Request() { } + + public String getLifecycleId() { + return this.lifecycleId; + } + + public SnapshotLifecyclePolicy getLifecycle() { + return this.lifecycle; + } + + public static Request parseRequest(String lifecycleId, XContentParser parser) { + return new Request(lifecycleId, SnapshotLifecyclePolicy.parse(parser, lifecycleId)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(lifecycleId); + lifecycle.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return lifecycle.validate(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(lifecycleId, lifecycle); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(lifecycleId, lifecycle); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return lifecycleId.equals(other.lifecycleId) && + lifecycle.equals(other.lifecycle); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response(boolean acknowledged) { + super(acknowledged); + } + + public Response(StreamInput streamInput) throws IOException { + this(streamInput.readBoolean()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/package-info.java new file mode 100644 index 0000000000000..dbf13fe892421 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/action/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Contains the action definitions for SLM. For the transport and rest action implementations, please see the {@code ilm} module's + * {@code org.elasticsearch.xpack.slm} package. + */ +package org.elasticsearch.xpack.core.snapshotlifecycle.action; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryItem.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryItem.java new file mode 100644 index 0000000000000..8120be9683fb7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryItem.java @@ -0,0 +1,223 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.history; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; + +/** + * Represents the record of a Snapshot Lifecycle Management action, so that it + * can be indexed in a history index or recorded to a log in a structured way + */ +public class SnapshotHistoryItem implements Writeable, ToXContentObject { + static final ParseField TIMESTAMP = new ParseField("@timestamp"); + static final ParseField POLICY_ID = new ParseField("policy"); + static final ParseField REPOSITORY = new ParseField("repository"); + static final ParseField SNAPSHOT_NAME = new ParseField("snapshot_name"); + static final ParseField OPERATION = new ParseField("operation"); + static final ParseField SUCCESS = new ParseField("success"); + private static final String CREATE_OPERATION = "CREATE"; + protected final long timestamp; + protected final String policyId; + protected final String repository; + protected final String snapshotName; + protected final String operation; + protected final boolean success; + + private final Map snapshotConfiguration; + @Nullable + private final String errorDetails; + + static final ParseField SNAPSHOT_CONFIG = new ParseField("configuration"); + static final ParseField ERROR_DETAILS = new ParseField("error_details"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_lifecycle_history_item", true, + (a, id) -> { + final long timestamp = (long) a[0]; + final String policyId = (String) a[1]; + final String repository = (String) a[2]; + final String snapshotName = (String) a[3]; + final String operation = (String) a[4]; + final boolean success = (boolean) a[5]; + final Map snapshotConfiguration = (Map) a[6]; + final String errorDetails = (String) a[7]; + return new SnapshotHistoryItem(timestamp, policyId, repository, snapshotName, operation, success, + snapshotConfiguration, errorDetails); + }); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIMESTAMP); + PARSER.declareString(ConstructingObjectParser.constructorArg(), POLICY_ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), REPOSITORY); + PARSER.declareString(ConstructingObjectParser.constructorArg(), SNAPSHOT_NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), OPERATION); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SUCCESS); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.map(), SNAPSHOT_CONFIG); + PARSER.declareStringOrNull(ConstructingObjectParser.constructorArg(), ERROR_DETAILS); + } + + public static SnapshotHistoryItem parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + SnapshotHistoryItem(long timestamp, String policyId, String repository, String snapshotName, String operation, + boolean success, Map snapshotConfiguration, String errorDetails) { + this.timestamp = timestamp; + this.policyId = Objects.requireNonNull(policyId); + this.repository = Objects.requireNonNull(repository); + this.snapshotName = Objects.requireNonNull(snapshotName); + this.operation = Objects.requireNonNull(operation); + this.success = success; + this.snapshotConfiguration = snapshotConfiguration; + this.errorDetails = errorDetails; + } + + public static SnapshotHistoryItem successRecord(long timestamp, SnapshotLifecyclePolicy policy, String snapshotName) { + return new SnapshotHistoryItem(timestamp, policy.getId(), policy.getRepository(), snapshotName, CREATE_OPERATION, true, + policy.getConfig(), null); + } + + public static SnapshotHistoryItem failureRecord(long timeStamp, SnapshotLifecyclePolicy policy, String snapshotName, + Exception exception) throws IOException { + ToXContent.Params stacktraceParams = new ToXContent.MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); + String exceptionString; + try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { + causeXContentBuilder.startObject(); + ElasticsearchException.generateThrowableXContent(causeXContentBuilder, stacktraceParams, exception); + causeXContentBuilder.endObject(); + exceptionString = BytesReference.bytes(causeXContentBuilder).utf8ToString(); + } + return new SnapshotHistoryItem(timeStamp, policy.getId(), policy.getRepository(), snapshotName, CREATE_OPERATION, false, + policy.getConfig(), exceptionString); + } + + public SnapshotHistoryItem(StreamInput in) throws IOException { + this.timestamp = in.readVLong(); + this.policyId = in.readString(); + this.repository = in.readString(); + this.snapshotName = in.readString(); + this.operation = in.readString(); + this.success = in.readBoolean(); + this.snapshotConfiguration = in.readMap(); + this.errorDetails = in.readOptionalString(); + } + + public Map getSnapshotConfiguration() { + return snapshotConfiguration; + } + + public String getErrorDetails() { + return errorDetails; + } + + public long getTimestamp() { + return timestamp; + } + + public String getPolicyId() { + return policyId; + } + + public String getRepository() { + return repository; + } + + public String getSnapshotName() { + return snapshotName; + } + + public String getOperation() { + return operation; + } + + public boolean isSuccess() { + return success; + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeVLong(timestamp); + out.writeString(policyId); + out.writeString(repository); + out.writeString(snapshotName); + out.writeString(operation); + out.writeBoolean(success); + out.writeMap(snapshotConfiguration); + out.writeOptionalString(errorDetails); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.timeField(TIMESTAMP.getPreferredName(), "timestamp_string", timestamp); + builder.field(POLICY_ID.getPreferredName(), policyId); + builder.field(REPOSITORY.getPreferredName(), repository); + builder.field(SNAPSHOT_NAME.getPreferredName(), snapshotName); + builder.field(OPERATION.getPreferredName(), operation); + builder.field(SUCCESS.getPreferredName(), success); + builder.field(SNAPSHOT_CONFIG.getPreferredName(), snapshotConfiguration); + builder.field(ERROR_DETAILS.getPreferredName(), errorDetails); + } + builder.endObject(); + + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + boolean result; + if (this == o) result = true; + if (o == null || getClass() != o.getClass()) result = false; + SnapshotHistoryItem that1 = (SnapshotHistoryItem) o; + result = isSuccess() == that1.isSuccess() && + timestamp == that1.getTimestamp() && + Objects.equals(getPolicyId(), that1.getPolicyId()) && + Objects.equals(getRepository(), that1.getRepository()) && + Objects.equals(getSnapshotName(), that1.getSnapshotName()) && + Objects.equals(getOperation(), that1.getOperation()); + if (!result) return false; + SnapshotHistoryItem that = (SnapshotHistoryItem) o; + return Objects.equals(getSnapshotConfiguration(), that.getSnapshotConfiguration()) && + Objects.equals(getErrorDetails(), that.getErrorDetails()); + } + + @Override + public int hashCode() { + return Objects.hash(getTimestamp(), getPolicyId(), getRepository(), getSnapshotName(), getOperation(), isSuccess(), + getSnapshotConfiguration(), getErrorDetails()); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryStore.java new file mode 100644 index 0000000000000..83ceb17ba3a80 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryStore.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.history; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING; +import static org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotLifecycleTemplateRegistry.INDEX_TEMPLATE_VERSION; + +/** + * Records Snapshot Lifecycle Management actions as represented by {@link SnapshotHistoryItem} into an index + * for the purposes of querying and alerting. + */ +public class SnapshotHistoryStore { + private static final Logger logger = LogManager.getLogger(SnapshotHistoryStore.class); + private static final DateFormatter indexTimeFormat = DateFormatter.forPattern("yyyy.MM"); + + public static final String SLM_HISTORY_INDEX_PREFIX = ".slm-history-" + INDEX_TEMPLATE_VERSION + "-"; + + private final Client client; + private final ZoneId timeZone; + private final boolean slmHistoryEnabled; + + public SnapshotHistoryStore(Settings nodeSettings, Client client, ZoneId timeZone) { + this.client = client; + this.timeZone = timeZone; + slmHistoryEnabled = SLM_HISTORY_INDEX_ENABLED_SETTING.get(nodeSettings); + } + + /** + * Attempts to asynchronously index a snapshot lifecycle management history entry + * + * @param item The entry to index + */ + public void putAsync(SnapshotHistoryItem item) { + if (slmHistoryEnabled == false) { + logger.trace("not recording snapshot history item because [{}] is [false]: [{}]", + SLM_HISTORY_INDEX_ENABLED_SETTING.getKey(), item); + return; + } + final ZonedDateTime dateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(item.getTimestamp()), timeZone); + final String index = getHistoryIndexNameForTime(dateTime); + logger.trace("about to index snapshot history item in index [{}]: [{}]", index, item); + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + item.toXContent(builder, ToXContent.EMPTY_PARAMS); + IndexRequest request = new IndexRequest(index) + .source(builder); + client.index(request, ActionListener.wrap(indexResponse -> { + logger.debug("successfully indexed snapshot history item with id [{}] in index [{}]: [{}]", + indexResponse.getId(), index, item); + }, exception -> { + logger.error(new ParameterizedMessage("failed to index snapshot history item in index [{}]: [{}]", + index, item), exception); + })); + } catch (IOException exception) { + logger.error(new ParameterizedMessage("failed to index snapshot history item in index [{}]: [{}]", + index, item), exception); + } + } + + + static String getHistoryIndexNameForTime(ZonedDateTime time) { + return SLM_HISTORY_INDEX_PREFIX + indexTimeFormat.format(time); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotLifecycleTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotLifecycleTemplateRegistry.java new file mode 100644 index 0000000000000..27e5e63013e56 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotLifecycleTemplateRegistry.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.history; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.template.IndexTemplateConfig; +import org.elasticsearch.xpack.core.template.IndexTemplateRegistry; +import org.elasticsearch.xpack.core.template.LifecyclePolicyConfig; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.INDEX_LIFECYCLE_ORIGIN; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING; + +/** + * Manages the index template and associated ILM policy for the Snapshot + * Lifecycle Management history index. + */ +public class SnapshotLifecycleTemplateRegistry extends IndexTemplateRegistry { + // history (please add a comment why you increased the version here) + // version 1: initial + public static final String INDEX_TEMPLATE_VERSION = "1"; + + public static final String SLM_TEMPLATE_VERSION_VARIABLE = "xpack.slm.template.version"; + public static final String SLM_TEMPLATE_NAME = ".slm-history"; + + public static final String SLM_POLICY_NAME = "slm-history-ilm-policy"; + + public static final IndexTemplateConfig TEMPLATE_SLM_HISTORY = new IndexTemplateConfig( + SLM_TEMPLATE_NAME, + "/slm-history.json", + INDEX_TEMPLATE_VERSION, + SLM_TEMPLATE_VERSION_VARIABLE + ); + + public static final LifecyclePolicyConfig SLM_HISTORY_POLICY = new LifecyclePolicyConfig( + SLM_POLICY_NAME, + "/slm-history-ilm-policy.json" + ); + + private final boolean slmHistoryEnabled; + + public SnapshotLifecycleTemplateRegistry(Settings nodeSettings, ClusterService clusterService, ThreadPool threadPool, Client client, + NamedXContentRegistry xContentRegistry) { + super(nodeSettings, clusterService, threadPool, client, xContentRegistry); + slmHistoryEnabled = SLM_HISTORY_INDEX_ENABLED_SETTING.get(nodeSettings); + } + + @Override + protected List getTemplateConfigs() { + if (slmHistoryEnabled == false) { + return Collections.emptyList(); + } + return Collections.singletonList(TEMPLATE_SLM_HISTORY); + } + + @Override + protected List getPolicyConfigs() { + if (slmHistoryEnabled == false) { + return Collections.emptyList(); + } + return Collections.singletonList(SLM_HISTORY_POLICY); + } + + @Override + protected String getOrigin() { + return INDEX_LIFECYCLE_ORIGIN; // TODO use separate SLM origin? + } + + public boolean validate(ClusterState state) { + boolean allTemplatesPresent = getTemplateConfigs().stream() + .map(IndexTemplateConfig::getTemplateName) + .allMatch(name -> state.metaData().getTemplates().containsKey(name)); + + Optional> maybePolicies = Optional + .ofNullable(state.metaData().custom(IndexLifecycleMetadata.TYPE)) + .map(IndexLifecycleMetadata::getPolicies); + Set policyNames = getPolicyConfigs().stream() + .map(LifecyclePolicyConfig::getPolicyName) + .collect(Collectors.toSet()); + + boolean allPoliciesPresent = maybePolicies + .map(policies -> policies.keySet() + .containsAll(policyNames)) + .orElse(false); + return allTemplatesPresent && allPoliciesPresent; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/package-info.java new file mode 100644 index 0000000000000..db1ab6930af94 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * This package contains the utility classes used to persist SLM policy execution results to an internal index. + * + *

The {@link org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotLifecycleTemplateRegistry} class is registered as a + * cluster state listener when the ILM plugin starts up. It executes only on the elected master node, and ensures that a template is + * configured for the SLM history index, as well as an ILM policy (since the two are always enabled in lock step). + * + *

The {@link org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryItem} is used to encapsulate historical + * information about a snapshot policy execution. This contains more data than the + * {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotInvocationRecord} since it is a more complete history record + * stored on disk instead of a low surface area status entry. + * + *

The {@link org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryStore} manages the persistence of the previously + * mentioned {@link org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryItem}. It simply does an asynchronous put + * operation against the SLM history internal index. + */ +package org.elasticsearch.xpack.core.snapshotlifecycle.history; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/package-info.java new file mode 100644 index 0000000000000..e0ad866f734f0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/snapshotlifecycle/package-info.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * This is the Snapshot Lifecycle Management (SLM) core package. This package contains the core classes for SLM, including all of the + * custom cluster state metadata objects, execution history storage facilities, and the action definitions. For the main SLM service + * implementation classes, please see the {@code ilm}ilm module's {@code org.elasticsearch.xpack.slm} package. + * + *

Contained within this specific package are the custom metadata objects and models used through out the SLM service. The names can + * be confusing, so it's important to know the differences between each metadata object. + * + *

The {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy} object is the user provided definition of the + * SLM policy. This is what a user provides when creating a snapshot policy, and acts as the blueprint for the create snapshot request + * that the service launches. It additionally surfaces the next point in time a policy should be executed. + * + *

Lateral to the policy, the {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotInvocationRecord} represents an execution + * of a policy. It includes within it the policy name and details about its execution, success or failure. + * + *

Next is the {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata} object, which not only contains + * the {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy} blueprint, but also any contextual information about + * that policy, like the user information of who created it so that it may be used during execution, as well as the version of the policy, + * and both the last failed and successful runs as {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotInvocationRecord}s. This + * is the living representation of a policy within the cluster state. + * + *

When a "Get Policy" action is executed, the {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyItem} is + * returned instead. This is a thin wrapper around the internal + * {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata} object so that we do not expose any sensitive + * internal headers or user information in the Get API. + * + *

Finally, the {@link org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata} class contains all living SLM + * policies and their metadata, acting as the SLM specific root object within the cluster state. + */ +package org.elasticsearch.xpack.core.snapshotlifecycle; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java new file mode 100644 index 0000000000000..9b1397c9642ee --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.spatial; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Objects; + +public class SpatialFeatureSetUsage extends XPackFeatureSet.Usage { + + public SpatialFeatureSetUsage(boolean available, boolean enabled) { + super(XPackField.SPATIAL, available, enabled); + } + + public SpatialFeatureSetUsage(StreamInput input) throws IOException { + super(input); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(available, enabled); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SpatialFeatureSetUsage other = (SpatialFeatureSetUsage) obj; + return Objects.equals(available, other.available) && + Objects.equals(enabled, other.enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java index 739cb08428c2f..efdb8c4bd6025 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -25,22 +25,23 @@ * Action to obtain information about X.509 (SSL/TLS) certificates that are being used by X-Pack. * The primary use case is for tracking the expiry dates of certificates. */ -public class GetCertificateInfoAction extends StreamableResponseActionType { +public class GetCertificateInfoAction extends ActionType { public static final GetCertificateInfoAction INSTANCE = new GetCertificateInfoAction(); public static final String NAME = "cluster:monitor/xpack/ssl/certificates/get"; private GetCertificateInfoAction() { - super(NAME); - } - - @Override - public GetCertificateInfoAction.Response newResponse() { - return new GetCertificateInfoAction.Response(); + super(NAME, GetCertificateInfoAction.Response::new); } public static class Request extends ActionRequest { + Request() {} + + Request(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; @@ -52,7 +53,13 @@ public static class Response extends ActionResponse implements ToXContentObject private Collection certificates; - public Response() { + public Response(StreamInput in) throws IOException { + super(in); + this.certificates = new ArrayList<>(); + int count = in.readVInt(); + for (int i = 0; i < count; i++) { + certificates.add(new CertificateInfo(in)); + } } public Response(Collection certificates) { @@ -76,16 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.certificates = new ArrayList<>(); - int count = in.readVInt(); - for (int i = 0; i < count; i++) { - certificates.add(new CertificateInfo(in)); - } } - } public static class RequestBuilder extends ActionRequestBuilder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java index 2f81d75e7d224..0c9f65540e08a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java @@ -25,8 +25,7 @@ public class TransportGetCertificateInfoAction extends HandledTransportAction { +public class IndexUpgradeAction extends ActionType { public static final IndexUpgradeAction INSTANCE = new IndexUpgradeAction(); public static final String NAME = "cluster:admin/xpack/upgrade"; private IndexUpgradeAction() { - super(NAME); - } - - @Override - public BulkByScrollResponse newResponse() { - return new BulkByScrollResponse(); + super(NAME, BulkByScrollResponse::new); } public static class Request extends MasterNodeReadRequest implements IndicesRequest { @@ -113,11 +108,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java index 523e594725b9d..ff45e56ab3a95 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java @@ -5,25 +5,20 @@ */ package org.elasticsearch.xpack.core.upgrade.actions; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoResponse; -public class IndexUpgradeInfoAction extends StreamableResponseActionType { +public class IndexUpgradeInfoAction extends ActionType { public static final IndexUpgradeInfoAction INSTANCE = new IndexUpgradeInfoAction(); public static final String NAME = "cluster:admin/xpack/upgrade/info"; private IndexUpgradeInfoAction() { - super(NAME); - } - - @Override - public IndexUpgradeInfoResponse newResponse() { - return new IndexUpgradeInfoResponse(); + super(NAME, IndexUpgradeInfoResponse::new); } public static class RequestBuilder diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java index bdf923a79d0e0..618b0c201bfb8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java @@ -9,7 +9,7 @@ import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; import java.util.HashMap; @@ -22,10 +22,17 @@ * Calling toNestedMap() will create a nested map, where each dot of the key name will nest deeper * The main reason for this class is that the stats producer should not be worried about how the map is actually nested */ -public class Counters implements Streamable { +public class Counters implements Writeable { private ObjectLongHashMap counters = new ObjectLongHashMap<>(); + public Counters(StreamInput in) throws IOException { + int counters = in.readVInt(); + for (int i = 0; i < counters; i++) { + inc(in.readString(), in.readVLong()); + } + } + public Counters(String ... names) { for (String name : names) { set(name); @@ -102,14 +109,6 @@ public Map toNestedMap() { return map; } - @Override - public void readFrom(StreamInput in) throws IOException { - int counters = in.readVInt(); - for (int i = 0; i < counters; i++) { - inc(in.readString(), in.readVLong()); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(counters.size()); @@ -119,12 +118,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static Counters read(StreamInput in) throws IOException { - Counters counters = new Counters(); - counters.readFrom(in); - return counters; - } - public static Counters merge(List counters) { Counters result = new Counters(); for (Counters c : counters) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java index 8bd999ebfd235..5787aff17cdf3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java @@ -8,13 +8,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.watcher.WatcherField; import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.common.CharArrays; +import org.elasticsearch.xpack.core.watcher.WatcherField; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; @@ -22,7 +22,6 @@ import javax.crypto.SecretKey; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; - import java.io.IOException; import java.io.InputStream; import java.security.MessageDigest; @@ -79,11 +78,16 @@ public CryptoService(Settings settings) throws IOException { throw new IllegalArgumentException("invalid key length [" + keyLength + "]. value must be a multiple of 8"); } - SecretKey systemKey = readSystemKey(WatcherField.ENCRYPTION_KEY_SETTING.get(settings)); - try { - encryptionKey = encryptionKey(systemKey, keyLength, keyAlgorithm); - } catch (NoSuchAlgorithmException nsae) { - throw new ElasticsearchException("failed to start crypto service. could not load encryption key", nsae); + try (InputStream in = WatcherField.ENCRYPTION_KEY_SETTING.get(settings)) { + if (in == null) { + throw new ElasticsearchException("setting [" + WatcherField.ENCRYPTION_KEY_SETTING.getKey() + "] must be set in keystore"); + } + SecretKey systemKey = readSystemKey(in); + try { + encryptionKey = encryptionKey(systemKey, keyLength, keyAlgorithm); + } catch (NoSuchAlgorithmException nsae) { + throw new ElasticsearchException("failed to start crypto service. could not load encryption key", nsae); + } } assert encryptionKey != null : "the encryption key should never be null"; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/QueuedWatch.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/QueuedWatch.java index 69f1295bb20b2..bbf1946c6ece9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/QueuedWatch.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/QueuedWatch.java @@ -7,7 +7,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -16,16 +16,13 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; -public class QueuedWatch implements Streamable, ToXContentObject { +public class QueuedWatch implements Writeable, ToXContentObject { private String watchId; private String watchRecordId; private ZonedDateTime triggeredTime; private ZonedDateTime executionTime; - public QueuedWatch() { - } - public QueuedWatch(WatchExecutionContext ctx) { this.watchId = ctx.id().watchId(); this.watchRecordId = ctx.id().value(); @@ -33,6 +30,13 @@ public QueuedWatch(WatchExecutionContext ctx) { this.executionTime = ctx.executionTime(); } + public QueuedWatch(StreamInput in) throws IOException { + watchId = in.readString(); + watchRecordId = in.readString(); + triggeredTime = Instant.ofEpochMilli(in.readVLong()).atZone(ZoneOffset.UTC); + executionTime = Instant.ofEpochMilli(in.readVLong()).atZone(ZoneOffset.UTC); + } + public String watchId() { return watchId; } @@ -53,14 +57,6 @@ public void executionTime(ZonedDateTime executionTime) { this.executionTime = executionTime; } - @Override - public void readFrom(StreamInput in) throws IOException { - watchId = in.readString(); - watchRecordId = in.readString(); - triggeredTime = Instant.ofEpochMilli(in.readVLong()).atZone(ZoneOffset.UTC); - executionTime = Instant.ofEpochMilli(in.readVLong()).atZone(ZoneOffset.UTC); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(watchId); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java index 055e08cd7a58c..c87f77133203d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java @@ -7,7 +7,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; @@ -18,18 +18,15 @@ import java.time.ZonedDateTime; import java.util.Map; -public class WatchExecutionSnapshot implements Streamable, ToXContentObject { +public class WatchExecutionSnapshot implements Writeable, ToXContentObject { - private String watchId; - private String watchRecordId; - private ZonedDateTime triggeredTime; - private ZonedDateTime executionTime; - private ExecutionPhase phase; + private final String watchId; + private final String watchRecordId; + private final ZonedDateTime triggeredTime; + private final ZonedDateTime executionTime; + private final ExecutionPhase phase; + private final StackTraceElement[] executionStackTrace; private String[] executedActions; - private StackTraceElement[] executionStackTrace; - - public WatchExecutionSnapshot() { - } public WatchExecutionSnapshot(WatchExecutionContext context, StackTraceElement[] executionStackTrace) { watchId = context.id().watchId(); @@ -48,6 +45,23 @@ public WatchExecutionSnapshot(WatchExecutionContext context, StackTraceElement[] this.executionStackTrace = executionStackTrace; } + public WatchExecutionSnapshot(StreamInput in) throws IOException { + watchId = in.readString(); + watchRecordId = in.readString(); + triggeredTime = Instant.ofEpochMilli(in.readVLong()).atZone(ZoneOffset.UTC); + executionTime = Instant.ofEpochMilli(in.readVLong()).atZone(ZoneOffset.UTC); + phase = ExecutionPhase.resolve(in.readString()); + int size = in.readVInt(); + executionStackTrace = new StackTraceElement[size]; + for (int i = 0; i < size; i++) { + String declaringClass = in.readString(); + String methodName = in.readString(); + String fileName = in.readOptionalString(); + int lineNumber = in.readInt(); + executionStackTrace[i] = new StackTraceElement(declaringClass, methodName, fileName, lineNumber); + } + } + public String watchId() { return watchId; } @@ -72,24 +86,6 @@ public StackTraceElement[] executionStackTrace() { return executionStackTrace; } - @Override - public void readFrom(StreamInput in) throws IOException { - watchId = in.readString(); - watchRecordId = in.readString(); - triggeredTime = Instant.ofEpochMilli(in.readVLong()).atZone(ZoneOffset.UTC); - executionTime = Instant.ofEpochMilli(in.readVLong()).atZone(ZoneOffset.UTC); - phase = ExecutionPhase.resolve(in.readString()); - int size = in.readVInt(); - executionStackTrace = new StackTraceElement[size]; - for (int i = 0; i < size; i++) { - String declaringClass = in.readString(); - String methodName = in.readString(); - String fileName = in.readOptionalString(); - int lineNumber = in.readInt(); - executionStackTrace[i] = new StackTraceElement(declaringClass, methodName, fileName, lineNumber); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(watchId); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java index a3c9bbb7351b0..f389af3b0db72 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.ack; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * This action acks a watch in memory, and the index */ -public class AckWatchAction extends StreamableResponseActionType { +public class AckWatchAction extends ActionType { public static final AckWatchAction INSTANCE = new AckWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/ack"; private AckWatchAction() { - super(NAME); - } - - @Override - public AckWatchResponse newResponse() { - return new AckWatchResponse(); + super(NAME, AckWatchResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequest.java index d56bb5779e229..8864364c88830 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequest.java @@ -89,11 +89,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String toString() { StringBuilder sb = new StringBuilder("ack [").append(watchId).append("]"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java index 0ce1b78328bac..a0d075d06517b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java @@ -21,7 +21,9 @@ public class AckWatchResponse extends ActionResponse { private WatchStatus status; - public AckWatchResponse() { + public AckWatchResponse(StreamInput in) throws IOException { + super(in); + status = in.readBoolean() ? new WatchStatus(in) : null; } public AckWatchResponse(@Nullable WatchStatus status) { @@ -35,12 +37,6 @@ public WatchStatus getStatus() { return status; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - status = in.readBoolean() ? new WatchStatus(in) : null; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(status != null); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java index 4b57b95113cca..bf85166ba10a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.activate; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * This action acks a watch in memory, and the index */ -public class ActivateWatchAction extends StreamableResponseActionType { +public class ActivateWatchAction extends ActionType { public static final ActivateWatchAction INSTANCE = new ActivateWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/activate"; private ActivateWatchAction() { - super(NAME); - } - - @Override - public ActivateWatchResponse newResponse() { - return new ActivateWatchResponse(); + super(NAME, ActivateWatchResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequest.java index 326c951bedfac..0bfea44ba36c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequest.java @@ -70,11 +70,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String toString() { return activate ? diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java index 9fa5d43acc6f4..af9e705d7a44a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java @@ -21,7 +21,9 @@ public class ActivateWatchResponse extends ActionResponse { private WatchStatus status; - public ActivateWatchResponse() { + public ActivateWatchResponse(StreamInput in) throws IOException { + super(in); + status = in.readBoolean() ? new WatchStatus(in) : null; } public ActivateWatchResponse(@Nullable WatchStatus status) { @@ -35,12 +37,6 @@ public WatchStatus getStatus() { return status; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - status = in.readBoolean() ? new WatchStatus(in) : null; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(status != null); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java index 00619966fc8f2..013506a92633b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java @@ -5,23 +5,18 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.delete; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse; /** * This action deletes an watch from in memory, the scheduler and the index */ -public class DeleteWatchAction extends StreamableResponseActionType { +public class DeleteWatchAction extends ActionType { public static final DeleteWatchAction INSTANCE = new DeleteWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/delete"; private DeleteWatchAction() { - super(NAME); - } - - @Override - public DeleteWatchResponse newResponse() { - return new DeleteWatchResponse(); + super(NAME, DeleteWatchResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java index 2c5699b1132b1..9fe77c514fee0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java @@ -5,23 +5,18 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.execute; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * This action executes a watch, either ignoring the schedule and condition or just the schedule and can execute a subset of the actions, * optionally persisting the history entry */ -public class ExecuteWatchAction extends StreamableResponseActionType { +public class ExecuteWatchAction extends ActionType { public static final ExecuteWatchAction INSTANCE = new ExecuteWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/execute"; private ExecuteWatchAction() { - super(NAME); - } - - @Override - public ExecuteWatchResponse newResponse() { - return new ExecuteWatchResponse(); + super(NAME, ExecuteWatchResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java index bece3e5a6f50a..d45a1a5c8b2b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java @@ -269,11 +269,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String toString() { return "execute[" + id + "]"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java index 5b8f1a2227338..bf791cd805a9c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java @@ -32,7 +32,10 @@ public class ExecuteWatchResponse extends ActionResponse implements ToXContentOb private String recordId; private XContentSource recordSource; - public ExecuteWatchResponse() { + public ExecuteWatchResponse(StreamInput in) throws IOException { + super(in); + recordId = in.readString(); + recordSource = XContentSource.readFrom(in); } public ExecuteWatchResponse(String recordId, BytesReference recordSource, XContentType contentType) { @@ -73,13 +76,6 @@ public XContentSource getRecordSource() { return recordSource; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recordId = in.readString(); - recordSource = XContentSource.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(recordId); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java index bef78b8e0e96b..3690c9cd715b7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.get; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * This action gets an watch by name */ -public class GetWatchAction extends StreamableResponseActionType { +public class GetWatchAction extends ActionType { public static final GetWatchAction INSTANCE = new GetWatchAction(); public static final String NAME = "cluster:monitor/xpack/watcher/watch/get"; private GetWatchAction() { - super(NAME); - } - - @Override - public GetWatchResponse newResponse() { - return new GetWatchResponse(); + super(NAME, GetWatchResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequest.java index a2e7dc171a750..ed1bc96ca8484 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequest.java @@ -67,11 +67,6 @@ public String getId() { return id; } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public String toString() { return "get [" + id +"]"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java index 769042a0df21d..ba54388527c14 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java @@ -29,7 +29,23 @@ public class GetWatchResponse extends ActionResponse implements ToXContentObject private long seqNo; private long primaryTerm; - public GetWatchResponse() { + public GetWatchResponse(StreamInput in) throws IOException { + super(in); + id = in.readString(); + found = in.readBoolean(); + if (found) { + status = new WatchStatus(in); + source = XContentSource.readFrom(in); + version = in.readZLong(); + seqNo = in.readZLong(); + primaryTerm = in.readVLong(); + } else { + status = null; + source = null; + version = Versions.NOT_FOUND; + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + } } /** @@ -86,26 +102,6 @@ public long getPrimaryTerm() { return primaryTerm; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - found = in.readBoolean(); - if (found) { - status = new WatchStatus(in); - source = XContentSource.readFrom(in); - version = in.readZLong(); - seqNo = in.readZLong(); - primaryTerm = in.readVLong(); - } else { - status = null; - source = null; - version = Versions.NOT_FOUND; - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java index 379c8ff672888..6556a649bada4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java @@ -5,23 +5,18 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.put; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; /** * This action puts an watch into the watch index and adds it to the scheduler */ -public class PutWatchAction extends StreamableResponseActionType { +public class PutWatchAction extends ActionType { public static final PutWatchAction INSTANCE = new PutWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/put"; private PutWatchAction() { - super(NAME); - } - - @Override - public PutWatchResponse newResponse() { - return new PutWatchResponse(); + super(NAME, PutWatchResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java index 80276946fd698..54c93b5097eaf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java @@ -7,7 +7,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.Writeable; public class WatcherServiceAction extends ActionType { @@ -16,11 +15,6 @@ public class WatcherServiceAction extends ActionType { public static final String NAME = "cluster:admin/xpack/watcher/service"; private WatcherServiceAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return AcknowledgedResponse::new; + super(NAME, AcknowledgedResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java index 64fc7abc2ae1b..5c37de33d5cb5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java @@ -20,6 +20,11 @@ public enum Command { START, STOP } private Command command; + public WatcherServiceRequest(StreamInput in) throws IOException { + super(in); + command = Command.valueOf(in.readString().toUpperCase(Locale.ROOT)); + } + public WatcherServiceRequest() { } @@ -52,12 +57,6 @@ public ActionRequestValidationException validate() { } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - command = Command.valueOf(in.readString().toUpperCase(Locale.ROOT)); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java index fbc55e0836c3e..36965aa451365 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.stats; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * This ActionType gets the stats for the watcher plugin */ -public class WatcherStatsAction extends StreamableResponseActionType { +public class WatcherStatsAction extends ActionType { public static final WatcherStatsAction INSTANCE = new WatcherStatsAction(); public static final String NAME = "cluster:monitor/xpack/watcher/stats/dist"; private WatcherStatsAction() { - super(NAME); - } - - @Override - public WatcherStatsResponse newResponse() { - return new WatcherStatsResponse(); + super(NAME, WatcherStatsResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequest.java index 38ebe8f9a742a..2f0f99ece4d45 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequest.java @@ -23,6 +23,14 @@ public class WatcherStatsRequest extends BaseNodesRequest { private boolean includeStats; public WatcherStatsRequest() { + super((String[]) null); + } + + public WatcherStatsRequest(StreamInput in) throws IOException { + super(in); + includeCurrentWatches = in.readBoolean(); + includeQueuedWatches = in.readBoolean(); + includeStats = in.readBoolean(); } public boolean includeCurrentWatches() { @@ -54,14 +62,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - includeCurrentWatches = in.readBoolean(); - includeQueuedWatches = in.readBoolean(); - includeStats = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -81,7 +81,12 @@ public static class Node extends BaseNodeRequest { private boolean includeQueuedWatches; private boolean includeStats; - public Node() {} + public Node(StreamInput in) throws IOException { + super(in); + includeCurrentWatches = in.readBoolean(); + includeQueuedWatches = in.readBoolean(); + includeStats = in.readBoolean(); + } public Node(WatcherStatsRequest request) { includeCurrentWatches = request.includeCurrentWatches(); @@ -101,14 +106,6 @@ public boolean includeStats() { return includeStats; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - includeCurrentWatches = in.readBoolean(); - includeQueuedWatches = in.readBoolean(); - includeStats = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java index 13b3ddb60b290..c019c8658f0da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java @@ -30,7 +30,9 @@ public class WatcherStatsResponse extends BaseNodesResponse readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::readNodeResponse); + return in.readList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override @@ -96,7 +92,22 @@ public static class Node extends BaseNodeResponse implements ToXContentObject { private List queuedWatches; private Counters stats; - public Node() { + public Node(StreamInput in) throws IOException { + super(in); + watchesCount = in.readLong(); + threadPoolQueueSize = in.readLong(); + threadPoolMaxSize = in.readLong(); + watcherState = WatcherState.fromId(in.readByte()); + + if (in.readBoolean()) { + snapshots = in.readList(WatchExecutionSnapshot::new); + } + if (in.readBoolean()) { + queuedWatches = in.readList(QueuedWatch::new); + } + if (in.readBoolean()) { + stats = new Counters(in); + } } public Node(DiscoveryNode node) { @@ -173,25 +184,6 @@ public void setStats(Counters stats) { this.stats = stats; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - watchesCount = in.readLong(); - threadPoolQueueSize = in.readLong(); - threadPoolMaxSize = in.readLong(); - watcherState = WatcherState.fromId(in.readByte()); - - if (in.readBoolean()) { - snapshots = in.readStreamableList(WatchExecutionSnapshot::new); - } - if (in.readBoolean()) { - queuedWatches = in.readStreamableList(QueuedWatch::new); - } - if (in.readBoolean()) { - stats = Counters.read(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -202,11 +194,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(snapshots != null); if (snapshots != null) { - out.writeStreamableList(snapshots); + out.writeList(snapshots); } out.writeBoolean(queuedWatches != null); if (queuedWatches != null) { - out.writeStreamableList(queuedWatches); + out.writeList(queuedWatches); } out.writeBoolean(stats != null); if (stats != null) { @@ -247,12 +239,5 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) builder.endObject(); return builder; } - - static WatcherStatsResponse.Node readNodeResponse(StreamInput in) - throws IOException { - WatcherStatsResponse.Node node = new WatcherStatsResponse.Node(); - node.readFrom(in); - return node; - } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java index 0f4361aa87b06..d065a1e89c931 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -37,7 +36,7 @@ import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeDate; import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeOptionalDate; -public class WatchStatus implements ToXContentObject, Streamable, Writeable { +public class WatchStatus implements ToXContentObject, Writeable { public static final String INCLUDE_STATE = "include_state"; @@ -239,11 +238,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/x-pack/plugin/core/src/main/resources/slm-history-ilm-policy.json b/x-pack/plugin/core/src/main/resources/slm-history-ilm-policy.json new file mode 100644 index 0000000000000..8bccc4d23cb46 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/slm-history-ilm-policy.json @@ -0,0 +1,10 @@ +{ + "phases": { + "delete": { + "min_age": "60d", + "actions": { + "delete": {} + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/slm-history.json b/x-pack/plugin/core/src/main/resources/slm-history.json new file mode 100644 index 0000000000000..762c398b2d9a2 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/slm-history.json @@ -0,0 +1,58 @@ +{ + "index_patterns": [ + ".slm-history-${xpack.slm.template.version}*" + ], + "order": 2147483647, + "settings": { + "index.number_of_shards": 1, + "index.number_of_replicas": 0, + "index.auto_expand_replicas": "0-1", + "index.lifecycle.name": "slm-history-ilm-policy", + "index.format": 1 + }, + "mappings": { + "_doc": { + "dynamic": false, + "properties": { + "@timestamp": { + "type": "date", + "format": "epoch_millis" + }, + "policy": { + "type": "keyword" + }, + "repository": { + "type": "keyword" + }, + "snapshot_name":{ + "type": "keyword" + }, + "operation": { + "type": "keyword" + }, + "success": { + "type": "boolean" + }, + "configuration": { + "type": "object", + "dynamic": false, + "properties": { + "indices": { + "type": "keyword" + }, + "partial": { + "type": "boolean" + }, + "include_global_state": { + "type": "boolean" + } + } + }, + "error_details": { + "type": "text", + "index": false + } + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponseTests.java index 3a10700ca1d90..41a38799ae38d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponseTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractBroadcastResponseTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse.ReloadDetails; import java.io.IOException; @@ -53,4 +54,12 @@ public void testToXContent() { + "}", output); } + + public void testSerialization() throws IOException { + ReloadAnalyzersResponse response = createTestInstance(); + ReloadAnalyzersResponse copy = copyWriteable(response, writableRegistry(), ReloadAnalyzersResponse::new, + VersionUtils.randomVersion(random())); + assertEquals(response.getReloadDetails(), copy.getReloadDetails()); + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadDetailsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadDetailsTests.java new file mode 100644 index 0000000000000..eb2fceb7f0ca6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadDetailsTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse.ReloadDetails; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +public class ReloadDetailsTests extends AbstractWireSerializingTestCase { + + @Override + protected ReloadDetails createTestInstance() { + return new ReloadDetails(randomAlphaOfLengthBetween(5, 10), Set.of(generateRandomStringArray(5, 5, false)), + Set.of(generateRandomStringArray(5, 5, false))); + } + + @Override + protected Reader instanceReader() { + return ReloadDetails::new; + } + + @Override + protected ReloadDetails mutateInstance(ReloadDetails instance) throws IOException { + String indexName = instance.getIndexName(); + Set reloadedAnalyzers = new HashSet<>(instance.getReloadedAnalyzers()); + Set reloadedIndicesNodes = new HashSet<>(instance.getReloadedIndicesNodes()); + int mutate = randomIntBetween(0, 2); + switch (mutate) { + case 0: + indexName = indexName + randomAlphaOfLength(2); + break; + case 1: + reloadedAnalyzers.add(randomAlphaOfLength(10)); + break; + case 2: + reloadedIndicesNodes.add(randomAlphaOfLength(10)); + break; + default: + throw new IllegalStateException("Requested to modify more than available parameters."); + } + return new ReloadDetails(indexName, reloadedIndicesNodes, reloadedAnalyzers); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadSynonymAnalyzerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadSynonymAnalyzerTests.java index 590e4bb982eb0..ef11e48586720 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadSynonymAnalyzerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadSynonymAnalyzerTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -104,4 +105,38 @@ public void testSynonymsUpdateable() throws FileNotFoundException, IOException { response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("field", "buzz")).get(); assertHitCount(response, 1L); } + + public void testUpdateableSynonymsRejectedAtIndexTime() throws FileNotFoundException, IOException { + String synonymsFileName = "synonyms.txt"; + Path configDir = node().getEnvironment().configFile(); + if (Files.exists(configDir) == false) { + Files.createDirectory(configDir); + } + Path synonymsFile = configDir.resolve(synonymsFileName); + if (Files.exists(synonymsFile) == false) { + Files.createFile(synonymsFile); + } + try (PrintWriter out = new PrintWriter( + new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.WRITE), StandardCharsets.UTF_8))) { + out.println("foo, baz"); + } + + final String indexName = "test"; + final String analyzerName = "my_synonym_analyzer"; + MapperException ex = expectThrows(MapperException.class, () -> client().admin().indices().prepareCreate(indexName) + .setSettings(Settings.builder() + .put("index.number_of_shards", 5) + .put("index.number_of_replicas", 0) + .put("analysis.analyzer." + analyzerName + ".tokenizer", "standard") + .putList("analysis.analyzer." + analyzerName + ".filter", "lowercase", "my_synonym_filter") + .put("analysis.filter.my_synonym_filter.type", "synonym") + .put("analysis.filter.my_synonym_filter.updateable", "true") + .put("analysis.filter.my_synonym_filter.synonyms_path", synonymsFileName)) + .addMapping("_doc", "field", "type=text,analyzer=" + analyzerName).get()); + + assertEquals( + "Failed to parse mapping [_doc]: analyzer [my_synonym_analyzer] " + + "contains filters [my_synonym_filter] that are not allowed to run in all mode.", + ex.getMessage()); + } } \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionRequestTests.java index 8c99114e5553c..e1b2c338acbb2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionRequestTests.java @@ -13,7 +13,7 @@ public class DeleteDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { - return new Request(randomAlphaOfLengthBetween(1, 20)); + return new Request(randomAlphaOfLengthBetween(1, 20), randomBoolean()); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java index ea6f2a47f4692..5eaf955249214 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java @@ -44,6 +44,7 @@ protected Request createTestInstance() { "transform-preview", randomSourceConfig(), new DestConfig("unused-transform-preview-index", null), + null, randomBoolean() ? DataFrameTransformConfigTests.randomSyncConfig() : null, null, PivotConfigTests.randomPivotConfig(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java index 94d4b2e20cd43..aef9844e5f502 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java @@ -6,16 +6,24 @@ package org.elasticsearch.xpack.core.dataframe.action; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction.Request; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.SyncConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; import org.junit.Before; -import java.io.IOException; +import java.util.List; -public class PutDataFrameTransformActionRequestTests extends AbstractSerializingDataFrameTestCase { +import static java.util.Collections.emptyList; + +public class PutDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { private String transformId; @Before @@ -23,24 +31,24 @@ public void setupTransformId() { transformId = randomAlphaOfLengthBetween(1, 10); } - @Override - protected Request doParseInstance(XContentParser parser) throws IOException { - return Request.fromXContent(parser, transformId); - } - @Override protected Writeable.Reader instanceReader() { return Request::new; } @Override - protected boolean supportsUnknownFields() { - return false; + protected Request createTestInstance() { + DataFrameTransformConfig config = DataFrameTransformConfigTests.randomDataFrameTransformConfigWithoutHeaders(transformId); + return new Request(config, randomBoolean()); } @Override - protected Request createTestInstance() { - DataFrameTransformConfig config = DataFrameTransformConfigTests.randomDataFrameTransformConfigWithoutHeaders(transformId); - return new Request(config); + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteables.add(new NamedWriteableRegistry.Entry(SyncConfig.class, DataFrameField.TIME_BASED_SYNC.getPreferredName(), + TimeSyncConfig::new)); + return new NamedWriteableRegistry(namedWriteables); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPositionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPositionTests.java new file mode 100644 index 0000000000000..dd57a0302a499 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPositionTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class DataFrameIndexerPositionTests extends AbstractSerializingTestCase { + + public static DataFrameIndexerPosition randomDataFrameIndexerPosition() { + return new DataFrameIndexerPosition(randomPosition(), randomPosition()); + } + + @Override + protected DataFrameIndexerPosition createTestInstance() { + return randomDataFrameIndexerPosition(); + } + + @Override + protected Reader instanceReader() { + return DataFrameIndexerPosition::new; + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected DataFrameIndexerPosition doParseInstance(XContentParser parser) throws IOException { + return DataFrameIndexerPosition.fromXContent(parser); + } + + private static Map randomPosition() { + if (randomBoolean()) { + return null; + } + int numFields = randomIntBetween(1, 5); + Map position = new HashMap<>(); + for (int i = 0; i < numFields; i++) { + Object value; + if (randomBoolean()) { + value = randomLong(); + } else { + value = randomAlphaOfLengthBetween(1, 10); + } + position.put(randomAlphaOfLengthBetween(3, 10), value); + } + + return position; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java index ad4f068870ba6..67cc4b91584c2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java @@ -162,9 +162,10 @@ public void testGetBehind() { checkpointsByIndexNew.remove(checkpointsByIndexNew.firstKey()); assertEquals((indices - 1) * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); - // remove 1st index from new, now old has 1 index more, behind can not be calculated + // remove 1st index from new, now old has 1 index more, which should be ignored checkpointsByIndexNew.remove(checkpointsByIndexNew.firstKey()); - assertEquals(-1L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); + + assertEquals((indices - 2) * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); } private static Map randomCheckpointsByIndex() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java index dd5b5c9ff8841..849ff1629c755 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -46,6 +47,7 @@ public static DataFrameTransformConfig randomDataFrameTransformConfigWithoutHead return new DataFrameTransformConfig(id, randomSourceConfig(), randomDestConfig(), + randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), randomBoolean() ? null : randomSyncConfig(), null, PivotConfigTests.randomPivotConfig(), @@ -58,6 +60,7 @@ public static DataFrameTransformConfig randomDataFrameTransformConfig(String id) return new DataFrameTransformConfig(id, randomSourceConfig(), randomDestConfig(), + randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), randomBoolean() ? null : randomSyncConfig(), randomHeaders(), PivotConfigTests.randomPivotConfig(), @@ -69,11 +72,11 @@ public static DataFrameTransformConfig randomDataFrameTransformConfig(String id) public static DataFrameTransformConfig randomInvalidDataFrameTransformConfig() { if (randomBoolean()) { return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomInvalidSourceConfig(), randomDestConfig(), - randomBoolean() ? randomSyncConfig() : null, randomHeaders(), PivotConfigTests.randomPivotConfig(), + null, randomBoolean() ? randomSyncConfig() : null, randomHeaders(), PivotConfigTests.randomPivotConfig(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); } // else return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomSourceConfig(), randomDestConfig(), - randomBoolean() ? randomSyncConfig() : null, randomHeaders(), PivotConfigTests.randomInvalidPivotConfig(), + null, randomBoolean() ? randomSyncConfig() : null, randomHeaders(), PivotConfigTests.randomInvalidPivotConfig(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); } @@ -146,7 +149,7 @@ public void testDefaultMatchAll() throws IOException { } } - public void testPreventHeaderInjection() throws IOException { + public void testPreventHeaderInjection() { String pivotTransform = "{" + " \"headers\" : {\"key\" : \"value\" }," + " \"source\" : {\"index\":\"src\"}," @@ -167,7 +170,7 @@ public void testPreventHeaderInjection() throws IOException { () -> createDataFrameTransformConfigFromString(pivotTransform, "test_header_injection")); } - public void testPreventCreateTimeInjection() throws IOException { + public void testPreventCreateTimeInjection() { String pivotTransform = "{" + " \"create_time\" : " + Instant.now().toEpochMilli() + " }," + " \"source\" : {\"index\":\"src\"}," @@ -188,7 +191,7 @@ public void testPreventCreateTimeInjection() throws IOException { () -> createDataFrameTransformConfigFromString(pivotTransform, "test_createTime_injection")); } - public void testPreventVersionInjection() throws IOException { + public void testPreventVersionInjection() { String pivotTransform = "{" + " \"version\" : \"7.3.0\"," + " \"source\" : {\"index\":\"src\"}," @@ -229,11 +232,11 @@ public void testXContentForInternalStorage() throws IOException { public void testMaxLengthDescription() { IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new DataFrameTransformConfig("id", - randomSourceConfig(), randomDestConfig(), null, null, PivotConfigTests.randomPivotConfig(), randomAlphaOfLength(1001))); + randomSourceConfig(), randomDestConfig(), null, null, null, PivotConfigTests.randomPivotConfig(), randomAlphaOfLength(1001))); assertThat(exception.getMessage(), equalTo("[description] must be less than 1000 characters in length.")); String description = randomAlphaOfLength(1000); DataFrameTransformConfig config = new DataFrameTransformConfig("id", - randomSourceConfig(), randomDestConfig(), null, null, PivotConfigTests.randomPivotConfig(), description); + randomSourceConfig(), randomDestConfig(), null, null, null, PivotConfigTests.randomPivotConfig(), description); assertThat(description, equalTo(config.getDescription())); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java index 9f4ac546c89a4..cc6fe88e5b273 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java @@ -12,8 +12,6 @@ import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; import java.util.function.Predicate; import static org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgressTests.randomDataFrameTransformProgress; @@ -24,7 +22,7 @@ public class DataFrameTransformStateTests extends AbstractSerializingTestCase instanceReader() { return DataFrameTransformState::new; } - private static Map randomPosition() { - if (randomBoolean()) { - return null; - } - int numFields = randomIntBetween(1, 5); - Map position = new HashMap<>(); - for (int i = 0; i < numFields; i++) { - Object value; - if (randomBoolean()) { - value = randomLong(); - } else { - value = randomAlphaOfLengthBetween(1, 10); - } - position.put(randomAlphaOfLengthBetween(3, 10), value); - } - return position; - } - @Override protected boolean supportsUnknownFields() { return true; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTests.java index d7463a6df7139..ce830240c63f9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -25,7 +26,8 @@ protected DataFrameTransform doParseInstance(XContentParser parser) throws IOExc @Override protected DataFrameTransform createTestInstance() { - return new DataFrameTransform(randomAlphaOfLength(10), randomBoolean() ? null : Version.CURRENT); + return new DataFrameTransform(randomAlphaOfLength(10), randomBoolean() ? null : Version.CURRENT, + randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000))); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfigTests.java index 763e13e77aee0..f6775712c127e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfigTests.java @@ -14,6 +14,8 @@ import java.io.IOException; +import static org.hamcrest.Matchers.equalTo; + public class TimeSyncConfigTests extends AbstractSerializingTestCase { public static TimeSyncConfig randomTimeSyncConfig() { @@ -35,4 +37,8 @@ protected Reader instanceReader() { return TimeSyncConfig::new; } + public void testDefaultDelay() { + TimeSyncConfig config = new TimeSyncConfig(randomAlphaOfLength(10), null); + assertThat(config.getDelay(), equalTo(TimeSyncConfig.DEFAULT_DELAY)); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java index 94661819e5008..c33ea2fd7f2cb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java @@ -14,12 +14,13 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigTests; @@ -37,7 +38,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.core.IsEqual.equalTo; -public class DeprecationInfoActionResponseTests extends AbstractStreamableTestCase { +public class DeprecationInfoActionResponseTests extends AbstractWireSerializingTestCase { @Override protected DeprecationInfoAction.Response createTestInstance() { @@ -57,8 +58,8 @@ protected DeprecationInfoAction.Response createTestInstance() { } @Override - protected DeprecationInfoAction.Response createBlankInstance() { - return new DeprecationInfoAction.Response(); + protected Writeable.Reader instanceReader() { + return DeprecationInfoAction.Response::new; } public void testFrom() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckRequestTests.java index 8dd7255a7f15a..0420a8e70f474 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckRequestTests.java @@ -6,16 +6,17 @@ package org.elasticsearch.xpack.core.deprecation; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; public class NodesDeprecationCheckRequestTests - extends AbstractStreamableTestCase { + extends AbstractWireSerializingTestCase { @Override - protected NodesDeprecationCheckRequest createBlankInstance() { - return new NodesDeprecationCheckRequest(); + protected Writeable.Reader instanceReader() { + return NodesDeprecationCheckRequest::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckResponseTests.java index 143c0e2f5ad50..8465c68e2d0d7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckResponseTests.java @@ -10,8 +10,9 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; import java.net.InetAddress; @@ -21,11 +22,11 @@ import java.util.List; public class NodesDeprecationCheckResponseTests - extends AbstractStreamableTestCase { + extends AbstractWireSerializingTestCase { @Override - protected NodesDeprecationCheckResponse createBlankInstance() { - return new NodesDeprecationCheckResponse(); + protected Writeable.Reader instanceReader() { + return NodesDeprecationCheckResponse::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsageTests.java new file mode 100644 index 0000000000000..34f7757fa2ff3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsageTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.frozen; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class FrozenIndicesFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected FrozenIndicesFeatureSetUsage createTestInstance() { + boolean available = randomBoolean(); + boolean enabled = randomBoolean(); + return new FrozenIndicesFeatureSetUsage(available, enabled, randomIntBetween(0, 100000)); + } + + @Override + protected FrozenIndicesFeatureSetUsage mutateInstance(FrozenIndicesFeatureSetUsage instance) throws IOException { + boolean available = instance.available(); + boolean enabled = instance.enabled(); + int numFrozenIndices = instance.getNumberOfFrozenIndices(); + switch (between(0, 2)) { + case 0: + available = available == false; + break; + case 1: + enabled = enabled == false; + break; + case 2: + numFrozenIndices = randomValueOtherThan(numFrozenIndices, () -> randomIntBetween(0, 100000)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new FrozenIndicesFeatureSetUsage(available, enabled, numFrozenIndices); + } + + @Override + protected Writeable.Reader instanceReader() { + return FrozenIndicesFeatureSetUsage::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 053e41d9b2a63..1593dfbb7551e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -8,8 +8,10 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponseSections; @@ -146,6 +148,100 @@ public int getStep() { } + private class MockIndexerFiveRuns extends AsyncTwoPhaseIndexer { + + // counters + private volatile boolean started = false; + private volatile int searchRequests = 0; + private volatile int searchOps = 0; + private volatile int processOps = 0; + private volatile int bulkOps = 0; + + protected MockIndexerFiveRuns(Executor executor, AtomicReference initialState, Integer initialPosition) { + super(executor, initialState, initialPosition, new MockJobStats()); + } + + @Override + protected String getJobId() { + return "mock_5_runs"; + } + + @Override + protected IterationResult doProcess(SearchResponse searchResponse) { + ++processOps; + if (processOps == 5) { + return new IterationResult<>(Collections.singletonList(new IndexRequest()), processOps, true); + } + else if (processOps % 2 == 0) { + return new IterationResult<>(Collections.emptyList(), processOps, false); + } + + return new IterationResult<>(Collections.singletonList(new IndexRequest()), processOps, false); + } + + @Override + protected SearchRequest buildSearchRequest() { + ++searchRequests; + return new SearchRequest(); + } + + @Override + protected void onStart(long now, ActionListener listener) { + started = true; + listener.onResponse(null); + } + + @Override + protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { + ++searchOps; + final SearchResponseSections sections = new SearchResponseSections( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), null, + null, false, null, null, 1); + + nextPhase.onResponse(new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null)); + } + + @Override + protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { + ++bulkOps; + nextPhase.onResponse(new BulkResponse(new BulkItemResponse[0], 100)); + } + + @Override + protected void doSaveState(IndexerState state, Integer position, Runnable next) { + next.run(); + } + + @Override + protected void onFailure(Exception exc) { + fail(exc.getMessage()); + } + + @Override + protected void onFinish(ActionListener listener) { + assertTrue(isFinished.compareAndSet(false, true)); + listener.onResponse(null); + } + + @Override + protected void onStop() { + assertTrue(isStopped.compareAndSet(false, true)); + } + + @Override + protected void onAbort() { + } + + public void assertCounters() { + assertTrue(started); + assertEquals(5L, searchRequests); + assertEquals(5L, searchOps); + assertEquals(5L, processOps); + assertEquals(2L, bulkOps); + } + + } + private class MockIndexerThrowsFirstSearch extends AsyncTwoPhaseIndexer { // test the execution order @@ -288,4 +384,20 @@ public void testStop_WhileIndexing() throws InterruptedException { executor.shutdownNow(); } } + + public void testFiveRuns() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + try { + MockIndexerFiveRuns indexer = new MockIndexerFiveRuns (executor, state, 2); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + assertTrue(awaitBusy(() -> isFinished.get())); + indexer.assertCounters(); + } finally { + executor.shutdownNow(); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java index cb4a1924f3b2b..aab38c7ebd44d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java @@ -9,9 +9,10 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.ArrayList; @@ -20,7 +21,7 @@ import java.util.List; import java.util.Map; -public class ExplainLifecycleResponseTests extends AbstractStreamableXContentTestCase { +public class ExplainLifecycleResponseTests extends AbstractSerializingTestCase { @Override protected ExplainLifecycleResponse createTestInstance() { @@ -33,8 +34,8 @@ protected ExplainLifecycleResponse createTestInstance() { } @Override - protected ExplainLifecycleResponse createBlankInstance() { - return new ExplainLifecycleResponse(); + protected Writeable.Reader instanceReader() { + return ExplainLifecycleResponse::new; } @Override @@ -55,6 +56,11 @@ protected boolean supportsUnknownFields() { return false; } + @Override + protected boolean assertToXContentEquivalence() { + return false; + } + protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(Arrays .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStepTests.java index 0198ed7abee7c..5f71604d24ea1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeStepTests.java @@ -14,7 +14,8 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; +import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; +import org.elasticsearch.xpack.core.frozen.action.FreezeIndexAction; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.junit.Before; import org.mockito.Mockito; @@ -78,8 +79,8 @@ public void testFreeze() { Mockito.when(client.admin()).thenReturn(adminClient); Mockito.when(adminClient.indices()).thenReturn(indicesClient); Mockito.doAnswer(invocation -> { - assertSame(invocation.getArguments()[0], TransportFreezeIndexAction.FreezeIndexAction.INSTANCE); - TransportFreezeIndexAction.FreezeRequest request = (TransportFreezeIndexAction.FreezeRequest) invocation.getArguments()[1]; + assertSame(invocation.getArguments()[0], FreezeIndexAction.INSTANCE); + FreezeRequest request = (FreezeRequest) invocation.getArguments()[1]; @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[2]; assertNotNull(request); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponseTests.java index 4b483dcf03945..4654a713eb467 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponseTests.java @@ -93,6 +93,11 @@ protected IndexLifecycleExplainResponse doParseInstance(XContentParser parser) t return IndexLifecycleExplainResponse.PARSER.apply(parser, null); } + @Override + protected boolean assertToXContentEquivalence() { + return false; + } + @Override protected IndexLifecycleExplainResponse mutateInstance(IndexLifecycleExplainResponse instance) throws IOException { String index = instance.getIndex(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java index 4c61f3016a13e..54d5f96f4ecaa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java @@ -6,18 +6,19 @@ package org.elasticsearch.xpack.core.indexlifecycle; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; -public class StartILMRequestTests extends AbstractStreamableTestCase { +public class StartILMRequestTests extends AbstractWireSerializingTestCase { @Override - protected StartILMRequest createBlankInstance() { + protected StartILMRequest createTestInstance() { return new StartILMRequest(); } @Override - protected StartILMRequest createTestInstance() { - return new StartILMRequest(); + protected Writeable.Reader instanceReader() { + return StartILMRequest::new; } public void testValidate() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java index be603ee33acc1..526965d9adbcd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java @@ -6,18 +6,19 @@ package org.elasticsearch.xpack.core.indexlifecycle; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; -public class StopILMRequestTests extends AbstractStreamableTestCase { +public class StopILMRequestTests extends AbstractWireSerializingTestCase { @Override - protected StopILMRequest createBlankInstance() { + protected StopILMRequest createTestInstance() { return new StopILMRequest(); } @Override - protected StopILMRequest createTestInstance() { - return new StopILMRequest(); + protected Writeable.Reader instanceReader() { + return StopILMRequest::new; } public void testValidate() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java index c864bd76eeac5..4afe0260abddb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Request; -public class DeleteLifecycleRequestTests extends AbstractStreamableTestCase { +public class DeleteLifecycleRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -16,8 +17,8 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java index 49caa0b48894e..976652f29c060 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Request; import java.util.Arrays; -public class GetLifecycleRequestTests extends AbstractStreamableTestCase { +public class GetLifecycleRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -18,8 +19,8 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java index 08688407b3db6..a63b4f03eaaf7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java @@ -6,7 +6,8 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; import org.elasticsearch.xpack.core.indexlifecycle.MockAction; @@ -20,7 +21,7 @@ import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests.randomTestLifecyclePolicy; -public class GetLifecycleResponseTests extends AbstractStreamableTestCase { +public class GetLifecycleResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -34,8 +35,8 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } protected NamedWriteableRegistry getNamedWriteableRegistry() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java index 84b966b402323..b79144a7b3f7a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java @@ -6,14 +6,15 @@ */ package org.elasticsearch.xpack.core.indexlifecycle.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.indexlifecycle.StepKeyTests; import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Request; import org.junit.Before; -public class MoveToStepRequestTests extends AbstractStreamableXContentTestCase { +public class MoveToStepRequestTests extends AbstractSerializingTestCase { private String index; private static final StepKeyTests stepKeyTests = new StepKeyTests(); @@ -29,8 +30,8 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java index 2c59d9ca5782a..745a3aad46815 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java @@ -8,9 +8,10 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; @@ -32,7 +33,7 @@ import java.util.Arrays; import java.util.List; -public class PutLifecycleRequestTests extends AbstractStreamableXContentTestCase { +public class PutLifecycleRequestTests extends AbstractSerializingTestCase { private String lifecycleName; @@ -47,8 +48,8 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java index e82f84564cae7..eeeba87a6e6d2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java @@ -7,13 +7,14 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Request; import java.io.IOException; import java.util.Arrays; -public class RemoveIndexLifecyclePolicyRequestTests extends AbstractStreamableTestCase { +public class RemoveIndexLifecyclePolicyRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -30,10 +31,10 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } - + @Override protected Request mutateInstance(Request instance) throws IOException { String[] indices = instance.indices(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java index a394e593e7307..eec29d02a6cb7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java @@ -6,8 +6,9 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Response; import java.io.IOException; @@ -16,12 +17,7 @@ import java.util.Collections; import java.util.List; -public class RemoveIndexLifecyclePolicyResponseTests extends AbstractStreamableXContentTestCase { - - @Override - protected Response createBlankInstance() { - return new Response(); - } +public class RemoveIndexLifecyclePolicyResponseTests extends AbstractSerializingTestCase { @Override protected Response createTestInstance() { @@ -29,6 +25,11 @@ protected Response createTestInstance() { return new Response(failedIndexes); } + @Override + protected Writeable.Reader instanceReader() { + return Response::new; + } + @Override protected Response mutateInstance(Response instance) throws IOException { List failedIndices = randomValueOtherThan(instance.getFailedIndexes(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java index 734bcf0b7df36..757571f59312e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java @@ -7,13 +7,14 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Request; import java.io.IOException; import java.util.Arrays; -public class RetryRequestTests extends AbstractStreamableTestCase { +public class RetryRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -29,6 +30,11 @@ protected Request createTestInstance() { return request; } + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } + @Override protected Request mutateInstance(Request instance) throws IOException { String[] indices = instance.indices(); @@ -50,9 +56,4 @@ protected Request mutateInstance(Request instance) throws IOException { newRequest.indicesOptions(indicesOptions); return newRequest; } - - @Override - protected Request createBlankInstance() { - return new Request(); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventActionRequestTests.java index e6f35c76b788a..6c0afaa8cdb1b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventActionRequestTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction.Request; -public class DeleteCalendarEventActionRequestTests extends AbstractStreamableTestCase { +public class DeleteCalendarEventActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -16,7 +17,7 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } } \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedRequestTests.java index 47cf3dc5eec5c..b7a0e9ceb53fa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedRequestTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction.Request; -public class DeleteDatafeedRequestTests extends AbstractStreamableTestCase { +public class DeleteDatafeedRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -16,7 +17,7 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataActionResponseTests.java index 34a03fb2e407f..c458896dbfe1e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataActionResponseTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction.Response; -public class DeleteExpiredDataActionResponseTests extends AbstractStreamableTestCase { +public class DeleteExpiredDataActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -16,7 +17,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteJobRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteJobRequestTests.java index 2482e9b562526..9aa492c04515f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteJobRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteJobRequestTests.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; -import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; -public class DeleteJobRequestTests extends AbstractStreamableTestCase { +public class DeleteJobRequestTests extends AbstractWireSerializingTestCase { @Override protected DeleteJobAction.Request createTestInstance() { @@ -18,7 +18,7 @@ protected DeleteJobAction.Request createTestInstance() { } @Override - protected DeleteJobAction.Request createBlankInstance() { - return new DeleteJobAction.Request(); + protected Writeable.Reader instanceReader() { + return DeleteJobAction.Request::new; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameActionRequestTests.java index e899b7e6642da..e93eb9b20132e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameActionRequestTests.java @@ -6,9 +6,10 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.EvaluateDataFrameAction.Request; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.softclassification.BinarySoftClassificationTests; @@ -16,7 +17,7 @@ import java.util.ArrayList; import java.util.List; -public class EvaluateDataFrameActionRequestTests extends AbstractStreamableXContentTestCase { +public class EvaluateDataFrameActionRequestTests extends AbstractSerializingTestCase { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { @@ -42,13 +43,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java index e72b054015885..557a044d27b21 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java @@ -7,7 +7,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; import java.util.Arrays; @@ -15,7 +16,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; -public class FindFileStructureActionRequestTests extends AbstractStreamableTestCase { +public class FindFileStructureActionRequestTests extends AbstractWireSerializingTestCase { @Override protected FindFileStructureAction.Request createTestInstance() { @@ -73,8 +74,8 @@ protected FindFileStructureAction.Request createTestInstance() { } @Override - protected FindFileStructureAction.Request createBlankInstance() { - return new FindFileStructureAction.Request(); + protected Writeable.Reader instanceReader() { + return FindFileStructureAction.Request::new; } public void testValidateLinesToSample() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionResponseTests.java index 706ee44a4fd97..0c8970fd35b4b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionResponseTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructureTests; -public class FindFileStructureActionResponseTests extends AbstractStreamableTestCase { +public class FindFileStructureActionResponseTests extends AbstractWireSerializingTestCase { @Override protected FindFileStructureAction.Response createTestInstance() { @@ -16,7 +17,7 @@ protected FindFileStructureAction.Response createTestInstance() { } @Override - protected FindFileStructureAction.Response createBlankInstance() { - return new FindFileStructureAction.Response(); + protected Writeable.Reader instanceReader() { + return FindFileStructureAction.Response::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionRequestTests.java index 1f65852a1ffb2..9349315a29f72 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.xpack.core.ml.action.GetBucketsAction.Request; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction.Request; -public class GetBucketActionRequestTests extends AbstractStreamableXContentTestCase { +public class GetBucketActionRequestTests extends AbstractSerializingTestCase { @Override protected Request createTestInstance() { @@ -51,13 +52,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new GetBucketsAction.Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionResponseTests.java index f55aca6b368a7..b9b1af32432c3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionResponseTests.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction.Response; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; @@ -17,7 +18,7 @@ import java.util.Date; import java.util.List; -public class GetBucketActionResponseTests extends AbstractStreamableTestCase { +public class GetBucketActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -75,8 +76,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new GetBucketsAction.Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsActionRequestTests.java index c6dee20dcfeea..fc9d2e6b80c9b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsActionRequestTests.java @@ -6,12 +6,12 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.action.util.PageParams; -public class GetCalendarEventsActionRequestTests extends AbstractStreamableXContentTestCase { +public class GetCalendarEventsActionRequestTests extends AbstractSerializingTestCase { @Override protected GetCalendarEventsAction.Request createTestInstance() { @@ -33,8 +33,8 @@ protected GetCalendarEventsAction.Request createTestInstance() { } @Override - protected GetCalendarEventsAction.Request createBlankInstance() { - return new GetCalendarEventsAction.Request(); + protected Writeable.Reader instanceReader() { + return GetCalendarEventsAction.Request::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsActionRequestTests.java index d01599e340fad..7d7ad2f73344e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsActionRequestTests.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.action.util.PageParams; -public class GetCalendarsActionRequestTests extends AbstractStreamableXContentTestCase { +public class GetCalendarsActionRequestTests extends AbstractSerializingTestCase { @Override protected GetCalendarsAction.Request createTestInstance() { @@ -25,8 +25,8 @@ protected GetCalendarsAction.Request createTestInstance() { } @Override - protected GetCalendarsAction.Request createBlankInstance() { - return new GetCalendarsAction.Request(); + protected Writeable.Reader instanceReader() { + return GetCalendarsAction.Request::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesRequestTests.java index 2cc7738d76172..b3d93ed5c9fbf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesRequestTests.java @@ -5,11 +5,12 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.action.util.PageParams; -public class GetCategoriesRequestTests extends AbstractStreamableXContentTestCase { +public class GetCategoriesRequestTests extends AbstractSerializingTestCase { @Override protected GetCategoriesAction.Request createTestInstance() { @@ -26,13 +27,13 @@ protected GetCategoriesAction.Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return GetCategoriesAction.Request::new; } @Override - protected GetCategoriesAction.Request createBlankInstance() { - return new GetCategoriesAction.Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesResponseTests.java index 05cddd8097a4d..0dccc9cde3eb3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesResponseTests.java @@ -5,13 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; import java.util.Collections; -public class GetCategoriesResponseTests extends AbstractStreamableTestCase { +public class GetCategoriesResponseTests extends AbstractWireSerializingTestCase { @Override protected GetCategoriesAction.Response createTestInstance() { @@ -22,7 +23,7 @@ protected GetCategoriesAction.Response createTestInstance() { } @Override - protected GetCategoriesAction.Response createBlankInstance() { - return new GetCategoriesAction.Response(); + protected Writeable.Reader instanceReader() { + return GetCategoriesAction.Response::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsActionResponseTests.java index 8a7b6717abd92..a82c3e6b957a2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsActionResponseTests.java @@ -6,10 +6,11 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction.Response; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; @@ -20,7 +21,7 @@ import java.util.Collections; import java.util.List; -public class GetDataFrameAnalyticsActionResponseTests extends AbstractStreamableTestCase { +public class GetDataFrameAnalyticsActionResponseTests extends AbstractWireSerializingTestCase { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { @@ -49,7 +50,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java index 0fbd8a3386eb9..74bc71074260e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java @@ -8,12 +8,13 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction.Response; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -34,7 +35,7 @@ import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; -public class GetDatafeedStatsActionResponseTests extends AbstractStreamableTestCase { +public class GetDatafeedStatsActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -61,8 +62,8 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } @SuppressWarnings("unchecked") @@ -77,7 +78,7 @@ public void testDatafeedStatsToXContent() throws IOException { Set.of(), Version.CURRENT); - DatafeedTimingStats timingStats = new DatafeedTimingStats("my-job-id", 5, 123.456); + DatafeedTimingStats timingStats = new DatafeedTimingStats("my-job-id", 5, 10, 100.0); Response.DatafeedStats stats = new Response.DatafeedStats("df-id", DatafeedState.STARTED, node, null, timingStats); @@ -109,9 +110,11 @@ public void testDatafeedStatsToXContent() throws IOException { assertThat(nodeAttributes, hasEntry("ml.max_open_jobs", "5")); Map timingStatsMap = (Map) dfStatsMap.get("timing_stats"); - assertThat(timingStatsMap.size(), is(equalTo(3))); + assertThat(timingStatsMap.size(), is(equalTo(5))); assertThat(timingStatsMap, hasEntry("job_id", "my-job-id")); assertThat(timingStatsMap, hasEntry("search_count", 5)); - assertThat(timingStatsMap, hasEntry("total_search_time_ms", 123.456)); + assertThat(timingStatsMap, hasEntry("bucket_count", 10)); + assertThat(timingStatsMap, hasEntry("total_search_time_ms", 100.0)); + assertThat(timingStatsMap, hasEntry("average_search_time_per_bucket_ms", 10.0)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java index 33bc6228b1293..645ea73320389 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java @@ -6,9 +6,10 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction.Response; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -18,7 +19,7 @@ import java.util.Collections; import java.util.List; -public class GetDatafeedsActionResponseTests extends AbstractStreamableTestCase { +public class GetDatafeedsActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -31,8 +32,8 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionRequestTests.java index af80712fb9ff2..a4419a60fca20 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionRequestTests.java @@ -5,11 +5,12 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetFiltersAction.Request; import org.elasticsearch.xpack.core.action.util.PageParams; -public class GetFiltersActionRequestTests extends AbstractStreamableTestCase { +public class GetFiltersActionRequestTests extends AbstractWireSerializingTestCase { @Override @@ -28,8 +29,7 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java index d6df42cd6177e..ba16c5a0afc54 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetFiltersAction.Response; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; @@ -13,7 +14,7 @@ import java.util.Collections; -public class GetFiltersActionResponseTests extends AbstractStreamableTestCase { +public class GetFiltersActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -24,8 +25,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionRequestTests.java index 7f2c94862c303..74e375bd6e797 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction.Request; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction.Request; -public class GetInfluencersActionRequestTests extends AbstractStreamableXContentTestCase { +public class GetInfluencersActionRequestTests extends AbstractSerializingTestCase { @Override protected Request doParseInstance(XContentParser parser) { @@ -49,13 +50,12 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionResponseTests.java index e01c8935365a8..8de799ecd20b0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionResponseTests.java @@ -5,16 +5,17 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; -import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction.Response; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction.Response; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import java.util.ArrayList; import java.util.Date; import java.util.List; -public class GetInfluencersActionResponseTests extends AbstractStreamableTestCase { +public class GetInfluencersActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -34,8 +35,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionResponseTests.java index 932386dba8b9d..30f8eb6f13280 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionResponseTests.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetJobsAction.Response; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -14,7 +15,7 @@ import java.util.ArrayList; import java.util.List; -public class GetJobsActionResponseTests extends AbstractStreamableTestCase { +public class GetJobsActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -32,8 +33,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionRequestTests.java index 54c5fc894b69a..636094acd70c1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionRequestTests.java @@ -5,16 +5,17 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction.Request; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction.Request; -public class GetModelSnapshotsActionRequestTests extends AbstractStreamableXContentTestCase { +public class GetModelSnapshotsActionRequestTests extends AbstractSerializingTestCase { @Override protected Request doParseInstance(XContentParser parser) { - return GetModelSnapshotsAction.Request.parseRequest(null, null, parser); + return Request.parseRequest(null, null, parser); } @Override @@ -41,13 +42,12 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionResponseTests.java index da3eb87ebd597..19a063f6bd3a0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionResponseTests.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction.Response; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; @@ -14,7 +15,7 @@ import java.util.ArrayList; import java.util.List; -public class GetModelSnapshotsActionResponseTests extends AbstractStreamableTestCase { +public class GetModelSnapshotsActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -28,7 +29,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionRequestTests.java index 9799010204cd6..2be2dcdab581d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction.Request; -public class GetOverallBucketsActionRequestTests extends AbstractStreamableXContentTestCase { +public class GetOverallBucketsActionRequestTests extends AbstractSerializingTestCase { @Override protected Request createTestInstance() { @@ -39,13 +40,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionResponseTests.java index ec40eebb502bd..4f3cdc6245eb6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionResponseTests.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction.Response; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.results.OverallBucket; @@ -14,7 +15,7 @@ import java.util.Date; import java.util.List; -public class GetOverallBucketsActionResponseTests extends AbstractStreamableTestCase { +public class GetOverallBucketsActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -34,7 +35,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionRequestTests.java index 982f6e17055d2..3936b88b9449b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.xpack.core.ml.action.GetRecordsAction.Request; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction.Request; -public class GetRecordsActionRequestTests extends AbstractStreamableXContentTestCase { +public class GetRecordsActionRequestTests extends AbstractSerializingTestCase { @Override protected Request doParseInstance(XContentParser parser) { @@ -49,13 +50,12 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionResponseTests.java index 1620820cba4b2..d74d8ef047283 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionResponseTests.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.GetRecordsAction.Response; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; @@ -14,7 +15,7 @@ import java.util.Date; import java.util.List; -public class GetRecordsActionResponseTests extends AbstractStreamableTestCase { +public class GetRecordsActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -30,8 +31,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/OpenJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/OpenJobActionRequestTests.java index da1ce5fdd0fa0..008757d3f45ef 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/OpenJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/OpenJobActionRequestTests.java @@ -5,11 +5,12 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.OpenJobAction.Request; -public class OpenJobActionRequestTests extends AbstractStreamableXContentTestCase { +public class OpenJobActionRequestTests extends AbstractSerializingTestCase { @Override protected Request createTestInstance() { @@ -17,13 +18,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventActionRequestTests.java index af94c180a1f78..cd984cc2549cf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventActionRequestTests.java @@ -7,9 +7,10 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEventTests; @@ -17,7 +18,7 @@ import java.util.ArrayList; import java.util.List; -public class PostCalendarEventActionRequestTests extends AbstractStreamableTestCase { +public class PostCalendarEventActionRequestTests extends AbstractWireSerializingTestCase { @Override protected PostCalendarEventsAction.Request createTestInstance() { @@ -25,6 +26,11 @@ protected PostCalendarEventsAction.Request createTestInstance() { return createTestInstance(id); } + @Override + protected Writeable.Reader instanceReader() { + return PostCalendarEventsAction.Request::new; + } + private PostCalendarEventsAction.Request createTestInstance(String calendarId) { int numEvents = randomIntBetween(1, 10); List events = new ArrayList<>(); @@ -36,12 +42,6 @@ private PostCalendarEventsAction.Request createTestInstance(String calendarId) { return request; } - @Override - protected PostCalendarEventsAction.Request createBlankInstance() { - return new PostCalendarEventsAction.Request(); - } - - public void testParseRequest() throws IOException { PostCalendarEventsAction.Request sourceRequest = createTestInstance(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedActionRequestTests.java index b7828e4b97682..d60420e4834fc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedActionRequestTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction.Request; -public class PreviewDatafeedActionRequestTests extends AbstractStreamableTestCase { +public class PreviewDatafeedActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -16,7 +17,7 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionRequestTests.java index 7d111a31c9d51..32c54d57e97e9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.calendars.CalendarTests; import org.elasticsearch.xpack.core.ml.job.config.JobTests; -public class PutCalendarActionRequestTests extends AbstractStreamableXContentTestCase { +public class PutCalendarActionRequestTests extends AbstractSerializingTestCase { private final String calendarId = JobTests.randomValidJobId(); @@ -20,13 +21,13 @@ protected PutCalendarAction.Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return PutCalendarAction.Request::new; } @Override - protected PutCalendarAction.Request createBlankInstance() { - return new PutCalendarAction.Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsActionRequestTests.java index 1e5416d5a5dce..dbd3db927503c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsActionRequestTests.java @@ -6,11 +6,12 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.PutDataFrameAnalyticsAction.Request; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfigTests; import org.elasticsearch.xpack.core.ml.dataframe.analyses.MlDataFrameAnalysisNamedXContentProvider; @@ -20,7 +21,7 @@ import java.util.Collections; import java.util.List; -public class PutDataFrameAnalyticsActionRequestTests extends AbstractStreamableXContentTestCase { +public class PutDataFrameAnalyticsActionRequestTests extends AbstractSerializingTestCase { private String id; @@ -51,13 +52,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsActionResponseTests.java index d323505828e42..6338e031070b5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsActionResponseTests.java @@ -6,10 +6,10 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.PutDataFrameAnalyticsAction.Response; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfigTests; import org.elasticsearch.xpack.core.ml.dataframe.analyses.MlDataFrameAnalysisNamedXContentProvider; @@ -18,7 +18,7 @@ import java.util.Collections; import java.util.List; -public class PutDataFrameAnalyticsActionResponseTests extends AbstractStreamableTestCase { +public class PutDataFrameAnalyticsActionResponseTests extends AbstractWireSerializingTestCase { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { @@ -28,21 +28,13 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(namedWriteables); } - @Override - protected NamedXContentRegistry xContentRegistry() { - List namedXContent = new ArrayList<>(); - namedXContent.addAll(new MlDataFrameAnalysisNamedXContentProvider().getNamedXContentParsers()); - namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); - return new NamedXContentRegistry(namedXContent); - } - @Override protected Response createTestInstance() { return new Response(DataFrameAnalyticsConfigTests.createRandom(DataFrameAnalyticsConfigTests.randomValidId())); } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionRequestTests.java index 8ba2f1bb161ea..2de9f9ec23165 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionRequestTests.java @@ -6,11 +6,12 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction.Request; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigTests; @@ -18,7 +19,7 @@ import java.util.Collections; -public class PutDatafeedActionRequestTests extends AbstractStreamableXContentTestCase { +public class PutDatafeedActionRequestTests extends AbstractSerializingTestCase { private String datafeedId; @@ -35,13 +36,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionResponseTests.java index 619770e8990ab..f61c0dfdac89d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionResponseTests.java @@ -6,9 +6,10 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction.Response; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigTests; @@ -16,7 +17,7 @@ import java.util.Arrays; import java.util.Collections; -public class PutDatafeedActionResponseTests extends AbstractStreamableTestCase { +public class PutDatafeedActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -27,8 +28,8 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java index bed0ab775af12..37ec18c45e770 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.PutFilterAction.Request; import org.elasticsearch.xpack.core.ml.job.config.MlFilterTests; -public class PutFilterActionRequestTests extends AbstractStreamableXContentTestCase { +public class PutFilterActionRequestTests extends AbstractSerializingTestCase { private final String filterId = MlFilterTests.randomValidFilterId(); @@ -20,13 +21,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new PutFilterAction.Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionResponseTests.java index 1e697f5172a4a..e712602be2d72 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionResponseTests.java @@ -5,14 +5,15 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.MlFilterTests; import java.io.IOException; -public class PutFilterActionResponseTests extends AbstractStreamableXContentTestCase { +public class PutFilterActionResponseTests extends AbstractSerializingTestCase { @Override protected PutFilterAction.Response createTestInstance() { @@ -20,8 +21,8 @@ protected PutFilterAction.Response createTestInstance() { } @Override - protected PutFilterAction.Response createBlankInstance() { - return new PutFilterAction.Response(); + protected Writeable.Reader instanceReader() { + return PutFilterAction.Response::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionRequestTests.java index 46dd97a63a8e1..379d90000db30 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionRequestTests.java @@ -6,10 +6,11 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.PutJobAction.Request; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -19,7 +20,7 @@ import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.randomValidJobId; -public class PutJobActionRequestTests extends AbstractStreamableXContentTestCase { +public class PutJobActionRequestTests extends AbstractSerializingTestCase { private final String jobId = randomValidJobId(); @@ -30,8 +31,8 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionResponseTests.java index 090ae23f2cbb0..9abca64c70924 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionResponseTests.java @@ -5,14 +5,15 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.PutJobAction.Response; import org.elasticsearch.xpack.core.ml.job.config.Job; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.randomValidJobId; -public class PutJobActionResponseTests extends AbstractStreamableTestCase { +public class PutJobActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -21,8 +22,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionRequestTests.java index cd1b48cb31aed..46b6999defcd2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionRequestTests.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction.Request; -public class RevertModelSnapshotActionRequestTests extends AbstractStreamableXContentTestCase { +public class RevertModelSnapshotActionRequestTests extends AbstractSerializingTestCase { @Override protected Request createTestInstance() { @@ -23,13 +23,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new RevertModelSnapshotAction.Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionResponseTests.java index f24a8ccb9e180..20dc85f8c98f4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionResponseTests.java @@ -5,11 +5,12 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction.Response; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotTests; -public class RevertModelSnapshotActionResponseTests extends AbstractStreamableTestCase { +public class RevertModelSnapshotActionResponseTests extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -17,8 +18,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new RevertModelSnapshotAction.Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedActionRequestTests.java index fe2bb5d6508e2..3b8eb0254ec73 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedActionRequestTests.java @@ -6,13 +6,14 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction.Request; import static org.hamcrest.Matchers.equalTo; -public class StartDatafeedActionRequestTests extends AbstractStreamableXContentTestCase { +public class StartDatafeedActionRequestTests extends AbstractSerializingTestCase { @Override protected Request createTestInstance() { @@ -20,13 +21,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java index b11c571bf78b8..3f6264815df45 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java @@ -5,9 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; -public class UpdateCalendarJobActionResquestTests extends AbstractStreamableTestCase { +public class UpdateCalendarJobActionResquestTests extends AbstractWireSerializingTestCase { @Override protected UpdateCalendarJobAction.Request createTestInstance() { @@ -17,7 +18,7 @@ protected UpdateCalendarJobAction.Request createTestInstance() { } @Override - protected UpdateCalendarJobAction.Request createBlankInstance() { - return new UpdateCalendarJobAction.Request(); + protected Writeable.Reader instanceReader() { + return UpdateCalendarJobAction.Request::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedActionRequestTests.java index 3674a6f7c4b96..97792d1bbad1d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedActionRequestTests.java @@ -6,11 +6,12 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction.Request; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigTests; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdateTests; @@ -18,7 +19,7 @@ import java.util.Collections; -public class UpdateDatafeedActionRequestTests extends AbstractStreamableXContentTestCase { +public class UpdateDatafeedActionRequestTests extends AbstractSerializingTestCase { private String datafeedId; @@ -33,13 +34,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java index f07eba7e90ebb..e38ee5724df5b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java @@ -5,15 +5,16 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction.Request; import java.util.ArrayList; import java.util.Collection; import java.util.List; -public class UpdateFilterActionRequestTests extends AbstractStreamableXContentTestCase { +public class UpdateFilterActionRequestTests extends AbstractSerializingTestCase { private String filterId = randomAlphaOfLength(20); @@ -32,6 +33,11 @@ protected Request createTestInstance() { return request; } + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } + private static Collection generateRandomStrings() { int size = randomIntBetween(0, 10); List strings = new ArrayList<>(size); @@ -46,11 +52,6 @@ protected boolean supportsUnknownFields() { return false; } - @Override - protected Request createBlankInstance() { - return new Request(); - } - @Override protected Request doParseInstance(XContentParser parser) { return Request.parseRequest(filterId, parser); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java index 20d27f03d0c29..e9beada65dc4f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; public class UpdateJobActionRequestTests - extends AbstractStreamableTestCase { + extends AbstractWireSerializingTestCase { @Override protected UpdateJobAction.Request createTestInstance() { @@ -30,8 +31,7 @@ protected UpdateJobAction.Request createTestInstance() { } @Override - protected UpdateJobAction.Request createBlankInstance() { - return new UpdateJobAction.Request(); + protected Writeable.Reader instanceReader() { + return UpdateJobAction.Request::new; } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionRequestTests.java index 8ccb8bb2e1916..f39d308d7cfcc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionRequestTests.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction.Request; -public class UpdateModelSnapshotActionRequestTests - extends AbstractStreamableXContentTestCase { +public class UpdateModelSnapshotActionRequestTests extends AbstractSerializingTestCase { @Override protected Request doParseInstance(XContentParser parser) { @@ -31,12 +31,12 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionResponseTests.java index a8694476ee259..5e1e25411ae73 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionResponseTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction.Response; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotTests; public class UpdateModelSnapshotActionResponseTests - extends AbstractStreamableTestCase { + extends AbstractWireSerializingTestCase { @Override protected Response createTestInstance() { @@ -18,7 +19,7 @@ protected Response createTestInstance() { } @Override - protected Response createBlankInstance() { - return new Response(); + protected Writeable.Reader instanceReader() { + return Response::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorActionRequestTests.java index d49908b1f1bae..993ed466777f5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorActionRequestTests.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction.Request; import org.elasticsearch.xpack.core.ml.job.config.Detector; -public class ValidateDetectorActionRequestTests extends AbstractStreamableXContentTestCase { +public class ValidateDetectorActionRequestTests extends AbstractSerializingTestCase { @Override protected Request createTestInstance() { @@ -24,13 +25,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigActionRequestTests.java index ac2d559c29aa1..f9226b336079d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigActionRequestTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -13,7 +14,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction.Request; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -23,7 +24,7 @@ import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.randomValidJobId; -public class ValidateJobConfigActionRequestTests extends AbstractStreamableTestCase { +public class ValidateJobConfigActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -31,8 +32,8 @@ protected Request createTestInstance() { } @Override - protected Request createBlankInstance() { - return new Request(); + protected Writeable.Reader instanceReader() { + return Request::new; } public void testParseRequest_InvalidCreateSetting() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java index 9ecff4974a751..e56475705eab1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java @@ -14,14 +14,16 @@ import java.io.IOException; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class DatafeedTimingStatsTests extends AbstractSerializingTestCase { private static final String JOB_ID = "my-job-id"; public static DatafeedTimingStats createRandom() { - return new DatafeedTimingStats(randomAlphaOfLength(10), randomLong(), randomDouble()); + return new DatafeedTimingStats(randomAlphaOfLength(10), randomLong(), randomLong(), randomDouble()); } @Override @@ -43,10 +45,12 @@ protected DatafeedTimingStats doParseInstance(XContentParser parser) { protected DatafeedTimingStats mutateInstance(DatafeedTimingStats instance) throws IOException { String jobId = instance.getJobId(); long searchCount = instance.getSearchCount(); + long bucketCount = instance.getBucketCount(); double totalSearchTimeMs = instance.getTotalSearchTimeMs(); return new DatafeedTimingStats( jobId + randomAlphaOfLength(5), - searchCount + 1, + searchCount + 2, + bucketCount + 1, totalSearchTimeMs + randomDoubleBetween(1.0, 100.0, true)); } @@ -58,14 +62,16 @@ public void testParse_OptionalFieldsAbsent() throws IOException { DatafeedTimingStats stats = DatafeedTimingStats.PARSER.apply(parser, null); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getSearchCount(), equalTo(0L)); + assertThat(stats.getBucketCount(), equalTo(0L)); assertThat(stats.getTotalSearchTimeMs(), equalTo(0.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), nullValue()); } } public void testEquals() { - DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 100.0); - DatafeedTimingStats stats2 = new DatafeedTimingStats(JOB_ID, 5, 100.0); - DatafeedTimingStats stats3 = new DatafeedTimingStats(JOB_ID, 5, 200.0); + DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0); + DatafeedTimingStats stats2 = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0); + DatafeedTimingStats stats3 = new DatafeedTimingStats(JOB_ID, 5, 10, 200.0); assertTrue(stats1.equals(stats1)); assertTrue(stats1.equals(stats2)); @@ -73,9 +79,9 @@ public void testEquals() { } public void testHashCode() { - DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 100.0); - DatafeedTimingStats stats2 = new DatafeedTimingStats(JOB_ID, 5, 100.0); - DatafeedTimingStats stats3 = new DatafeedTimingStats(JOB_ID, 5, 200.0); + DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0); + DatafeedTimingStats stats2 = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0); + DatafeedTimingStats stats3 = new DatafeedTimingStats(JOB_ID, 5, 10, 200.0); assertEquals(stats1.hashCode(), stats1.hashCode()); assertEquals(stats1.hashCode(), stats2.hashCode()); @@ -83,32 +89,72 @@ public void testHashCode() { } public void testConstructorsAndGetters() { - DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 123.456); + DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 10, 123.456); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getSearchCount(), equalTo(5L)); + assertThat(stats.getBucketCount(), equalTo(10L)); assertThat(stats.getTotalSearchTimeMs(), equalTo(123.456)); + assertThat(stats.getAvgSearchTimePerBucketMs(), closeTo(12.3456, 1e-9)); stats = new DatafeedTimingStats(JOB_ID); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getSearchCount(), equalTo(0L)); + assertThat(stats.getBucketCount(), equalTo(0L)); assertThat(stats.getTotalSearchTimeMs(), equalTo(0.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), nullValue()); } public void testCopyConstructor() { - DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 123.456); + DatafeedTimingStats stats1 = new DatafeedTimingStats(JOB_ID, 5, 10, 123.456); DatafeedTimingStats stats2 = new DatafeedTimingStats(stats1); assertThat(stats2.getJobId(), equalTo(JOB_ID)); assertThat(stats2.getSearchCount(), equalTo(5L)); + assertThat(stats2.getBucketCount(), equalTo(10L)); assertThat(stats2.getTotalSearchTimeMs(), equalTo(123.456)); + assertThat(stats2.getAvgSearchTimePerBucketMs(), closeTo(12.3456, 1e-9)); } public void testIncrementTotalSearchTimeMs() { - DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 100.0); + DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0); stats.incrementTotalSearchTimeMs(200.0); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getSearchCount(), equalTo(6L)); + assertThat(stats.getBucketCount(), equalTo(10L)); assertThat(stats.getTotalSearchTimeMs(), equalTo(300.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(30.0)); + } + + public void testSetBucketCount() { + DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0); + stats.setBucketCount(20); + assertThat(stats.getJobId(), equalTo(JOB_ID)); + assertThat(stats.getSearchCount(), equalTo(5L)); + assertThat(stats.getBucketCount(), equalTo(20L)); + assertThat(stats.getTotalSearchTimeMs(), equalTo(100.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(5.0)); + } + + public void testAvgSearchTimePerBucketIsCalculatedProperlyAfterUpdates() { + DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0); + assertThat(stats.getBucketCount(), equalTo(10L)); + assertThat(stats.getTotalSearchTimeMs(), equalTo(100.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(10.0)); + + stats.setBucketCount(20); + assertThat(stats.getBucketCount(), equalTo(20L)); + assertThat(stats.getTotalSearchTimeMs(), equalTo(100.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(5.0)); + + stats.incrementTotalSearchTimeMs(200.0); + assertThat(stats.getBucketCount(), equalTo(20L)); + assertThat(stats.getTotalSearchTimeMs(), equalTo(300.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(15.0)); + + stats.setBucketCount(25); + assertThat(stats.getBucketCount(), equalTo(25L)); + assertThat(stats.getTotalSearchTimeMs(), equalTo(300.0)); + assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(12.0)); } public void testDocumentId() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredErrorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredErrorTests.java new file mode 100644 index 0000000000000..4351351474761 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredErrorTests.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.EvaluationMetricResult; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MeanSquaredErrorTests extends AbstractSerializingTestCase { + + @Override + protected MeanSquaredError doParseInstance(XContentParser parser) throws IOException { + return MeanSquaredError.fromXContent(parser); + } + + @Override + protected MeanSquaredError createTestInstance() { + return createRandom(); + } + + @Override + protected Writeable.Reader instanceReader() { + return MeanSquaredError::new; + } + + public static MeanSquaredError createRandom() { + return new MeanSquaredError(); + } + + public void testEvaluate() { + Aggregations aggs = new Aggregations(Arrays.asList( + createSingleMetricAgg("regression_mean_squared_error", 0.8123), + createSingleMetricAgg("some_other_single_metric_agg", 0.2377) + )); + + MeanSquaredError mse = new MeanSquaredError(); + EvaluationMetricResult result = mse.evaluate(aggs); + + String expected = "{\"error\":0.8123}"; + assertThat(Strings.toString(result), equalTo(expected)); + } + + public void testEvaluate_GivenMissingAggs() { + Aggregations aggs = new Aggregations(Collections.singletonList( + createSingleMetricAgg("some_other_single_metric_agg", 0.2377) + )); + + MeanSquaredError mse = new MeanSquaredError(); + EvaluationMetricResult result = mse.evaluate(aggs); + assertThat(result, is(nullValue())); + } + + private static NumericMetricsAggregation.SingleValue createSingleMetricAgg(String name, double value) { + NumericMetricsAggregation.SingleValue agg = mock(NumericMetricsAggregation.SingleValue.class); + when(agg.getName()).thenReturn(name); + when(agg.value()).thenReturn(value); + return agg; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquaredTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquaredTests.java new file mode 100644 index 0000000000000..97ec16494e0e9 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquaredTests.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.metrics.ExtendedStats; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.EvaluationMetricResult; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RSquaredTests extends AbstractSerializingTestCase { + + @Override + protected RSquared doParseInstance(XContentParser parser) throws IOException { + return RSquared.fromXContent(parser); + } + + @Override + protected RSquared createTestInstance() { + return createRandom(); + } + + @Override + protected Writeable.Reader instanceReader() { + return RSquared::new; + } + + public static RSquared createRandom() { + return new RSquared(); + } + + public void testEvaluate() { + Aggregations aggs = new Aggregations(Arrays.asList( + createSingleMetricAgg("residual_sum_of_squares", 10_111), + createExtendedStatsAgg("extended_stats_actual", 155.23, 1000), + createExtendedStatsAgg("some_other_extended_stats",99.1, 10_000), + createSingleMetricAgg("some_other_single_metric_agg", 0.2377) + )); + + RSquared rSquared = new RSquared(); + EvaluationMetricResult result = rSquared.evaluate(aggs); + + String expected = "{\"value\":0.9348643947690524}"; + assertThat(Strings.toString(result), equalTo(expected)); + } + + public void testEvaluateWithZeroCount() { + Aggregations aggs = new Aggregations(Arrays.asList( + createSingleMetricAgg("residual_sum_of_squares", 0), + createExtendedStatsAgg("extended_stats_actual", 0.0, 0), + createExtendedStatsAgg("some_other_extended_stats",99.1, 10_000), + createSingleMetricAgg("some_other_single_metric_agg", 0.2377) + )); + + RSquared rSquared = new RSquared(); + EvaluationMetricResult result = rSquared.evaluate(aggs); + assertThat(result, is(nullValue())); + } + + public void testEvaluate_GivenMissingAggs() { + Aggregations aggs = new Aggregations(Collections.singletonList( + createSingleMetricAgg("some_other_single_metric_agg", 0.2377) + )); + + RSquared rSquared = new RSquared(); + EvaluationMetricResult result = rSquared.evaluate(aggs); + assertThat(result, is(nullValue())); + + aggs = new Aggregations(Arrays.asList( + createSingleMetricAgg("some_other_single_metric_agg", 0.2377), + createSingleMetricAgg("residual_sum_of_squares", 0.2377) + )); + + result = rSquared.evaluate(aggs); + assertThat(result, is(nullValue())); + + aggs = new Aggregations(Arrays.asList( + createSingleMetricAgg("some_other_single_metric_agg", 0.2377), + createExtendedStatsAgg("extended_stats_actual",100, 50) + )); + + result = rSquared.evaluate(aggs); + assertThat(result, is(nullValue())); + } + + private static NumericMetricsAggregation.SingleValue createSingleMetricAgg(String name, double value) { + NumericMetricsAggregation.SingleValue agg = mock(NumericMetricsAggregation.SingleValue.class); + when(agg.getName()).thenReturn(name); + when(agg.value()).thenReturn(value); + return agg; + } + + private static ExtendedStats createExtendedStatsAgg(String name, double variance, long count) { + ExtendedStats agg = mock(ExtendedStats.class); + when(agg.getName()).thenReturn(name); + when(agg.getVariance()).thenReturn(variance); + when(agg.getCount()).thenReturn(count); + return agg; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionTests.java new file mode 100644 index 0000000000000..d0bcc1a11f470 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class RegressionTests extends AbstractSerializingTestCase { + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(new MlEvaluationNamedXContentProvider().getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + } + + public static Regression createRandom() { + List metrics = new ArrayList<>(); + if (randomBoolean()) { + metrics.add(MeanSquaredErrorTests.createRandom()); + } + if (randomBoolean()) { + metrics.add(RSquaredTests.createRandom()); + } + return new Regression(randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomBoolean() ? + null : + metrics.isEmpty() ? + null : + metrics); + } + + @Override + protected Regression doParseInstance(XContentParser parser) throws IOException { + return Regression.fromXContent(parser); + } + + @Override + protected Regression createTestInstance() { + return createRandom(); + } + + @Override + protected Writeable.Reader instanceReader() { + return Regression::new; + } + + public void testConstructor_GivenEmptyMetrics() { + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> new Regression("foo", "bar", Collections.emptyList())); + assertThat(e.getMessage(), equalTo("[regression] must have one or more metrics")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index ee8d921485996..13ce6f2ab610d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -10,6 +10,11 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -17,11 +22,13 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -35,6 +42,7 @@ import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; import org.elasticsearch.xpack.core.ml.job.results.ReservedFieldNames; import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.mockito.ArgumentCaptor; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; @@ -48,7 +56,16 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; public class ElasticsearchMappingsTests extends ESTestCase { @@ -207,6 +224,54 @@ public void testMappingRequiresUpdateNewerMappingVersionMinor() throws IOExcepti ElasticsearchMappings.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousMinorVersion())); } + public void testAddDocMappingIfMissing() throws IOException { + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + doAnswer( + invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(new AcknowledgedResponse(true)); + return null; + }) + .when(client).execute(eq(PutMappingAction.INSTANCE), any(), any(ActionListener.class)); + + ClusterState clusterState = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("index-name", "0.0")); + ElasticsearchMappings.addDocMappingIfMissing( + "index-name", + ElasticsearchMappingsTests::fakeMapping, + client, + clusterState, + ActionListener.wrap( + ok -> assertTrue(ok), + e -> fail(e.toString()) + ) + ); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(PutMappingRequest.class); + verify(client).threadPool(); + verify(client).execute(eq(PutMappingAction.INSTANCE), requestCaptor.capture(), any(ActionListener.class)); + verifyNoMoreInteractions(client); + + PutMappingRequest request = requestCaptor.getValue(); + assertThat(request.type(), equalTo("_doc")); + assertThat(request.indices(), equalTo(new String[] { "index-name" })); + assertThat(request.source(), equalTo("{\"_doc\":{\"properties\":{\"some-field\":{\"type\":\"long\"}}}}")); + } + + private static XContentBuilder fakeMapping(String mappingType) throws IOException { + return jsonBuilder() + .startObject() + .startObject(mappingType) + .startObject(ElasticsearchMappings.PROPERTIES) + .startObject("some-field") + .field(ElasticsearchMappings.TYPE, ElasticsearchMappings.LONG) + .endObject() + .endObject() + .endObject() + .endObject(); + } private ClusterState getClusterStateWithMappingsWithMetaData(Map namesAndVersions) throws IOException { MetaData.Builder metaDataBuilder = MetaData.builder(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStatsTests.java index 99e29d01bd724..1a35d0feaedaf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStatsTests.java @@ -69,6 +69,7 @@ public void testDefaultConstructor() { assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getBucketCount(), equalTo(0L)); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(0.0)); assertThat(stats.getMinBucketProcessingTimeMs(), nullValue()); assertThat(stats.getMaxBucketProcessingTimeMs(), nullValue()); assertThat(stats.getAvgBucketProcessingTimeMs(), nullValue()); @@ -80,6 +81,7 @@ public void testConstructor() { assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getBucketCount(), equalTo(7L)); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(8.61)); assertThat(stats.getMinBucketProcessingTimeMs(), equalTo(1.0)); assertThat(stats.getMaxBucketProcessingTimeMs(), equalTo(2.0)); assertThat(stats.getAvgBucketProcessingTimeMs(), equalTo(1.23)); @@ -92,6 +94,7 @@ public void testCopyConstructor() { assertThat(stats2.getJobId(), equalTo(JOB_ID)); assertThat(stats2.getBucketCount(), equalTo(7L)); + assertThat(stats2.getTotalBucketProcessingTimeMs(), equalTo(8.61)); assertThat(stats2.getMinBucketProcessingTimeMs(), equalTo(1.0)); assertThat(stats2.getMaxBucketProcessingTimeMs(), equalTo(2.0)); assertThat(stats2.getAvgBucketProcessingTimeMs(), equalTo(1.23)); @@ -119,6 +122,26 @@ public void testUpdateStats() { assertThat(stats, areCloseTo(new TimingStats(JOB_ID, 5, 1.0, 5.0, 3.0, 3.00029801), 1e-9)); } + public void testTotalBucketProcessingTimeIsCalculatedProperlyAfterUpdates() { + TimingStats stats = new TimingStats(JOB_ID); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(0.0)); + + stats.updateStats(3); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(3.0)); + + stats.updateStats(2); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(5.0)); + + stats.updateStats(4); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(9.0)); + + stats.updateStats(1); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(10.0)); + + stats.updateStats(5); + assertThat(stats.getTotalBucketProcessingTimeMs(), equalTo(15.0)); + } + public void testDocumentId() { assertThat(TimingStats.documentId("my-job-id"), equalTo("my-job-id_timing_stats")); } @@ -138,6 +161,7 @@ private static Matcher areCloseTo(TimingStats operand, double error protected boolean matchesSafely(TimingStats item) { return equalTo(operand.getJobId()).matches(item.getJobId()) && equalTo(operand.getBucketCount()).matches(item.getBucketCount()) + && closeTo(operand.getTotalBucketProcessingTimeMs(), error).matches(item.getTotalBucketProcessingTimeMs()) && closeTo(operand.getMinBucketProcessingTimeMs(), error).matches(item.getMinBucketProcessingTimeMs()) && closeTo(operand.getMaxBucketProcessingTimeMs(), error).matches(item.getMaxBucketProcessingTimeMs()) && closeTo(operand.getAvgBucketProcessingTimeMs(), error).matches(item.getAvgBucketProcessingTimeMs()) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequestTests.java index 03232181f930e..186f9dcfe0018 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesRequestTests.java @@ -29,8 +29,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput output = new BytesStreamOutput(); original.writeTo(output); output.flush(); - final DeletePrivilegesRequest copy = new DeletePrivilegesRequest(); - copy.readFrom(output.bytes().streamInput()); + final DeletePrivilegesRequest copy = new DeletePrivilegesRequest(output.bytes().streamInput()); assertThat(copy.application(), equalTo(original.application())); assertThat(copy.privileges(), equalTo(original.privileges())); assertThat(copy.getRefreshPolicy(), equalTo(original.getRefreshPolicy())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponseTests.java index d490177c0cec4..8f8058de50e80 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponseTests.java @@ -23,8 +23,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput output = new BytesStreamOutput(); original.writeTo(output); output.flush(); - final DeletePrivilegesResponse copy = new DeletePrivilegesResponse(); - copy.readFrom(output.bytes().streamInput()); + final DeletePrivilegesResponse copy = new DeletePrivilegesResponse(output.bytes().streamInput()); assertThat(copy.found(), equalTo(original.found())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTests.java index f52b093d62b63..2a52713926752 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTests.java @@ -22,8 +22,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); original.writeTo(out); - final GetBuiltinPrivilegesResponse copy = new GetBuiltinPrivilegesResponse(); - copy.readFrom(out.bytes().streamInput()); + final GetBuiltinPrivilegesResponse copy = new GetBuiltinPrivilegesResponse(out.bytes().streamInput()); assertThat(copy.getClusterPrivileges(), Matchers.equalTo(cluster)); assertThat(copy.getIndexPrivileges(), Matchers.equalTo(index)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequestTests.java index db867f6775722..35a03d5882157 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesRequestTests.java @@ -29,8 +29,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); original.writeTo(out); - final GetPrivilegesRequest copy = new GetPrivilegesRequest(); - copy.readFrom(out.bytes().streamInput()); + final GetPrivilegesRequest copy = new GetPrivilegesRequest(out.bytes().streamInput()); assertThat(original.application(), Matchers.equalTo(copy.application())); assertThat(original.privileges(), Matchers.equalTo(copy.privileges())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesResponseTests.java index fe75dfbd52433..2015b5b522307 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesResponseTests.java @@ -24,8 +24,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); original.writeTo(out); - final GetPrivilegesResponse copy = new GetPrivilegesResponse(); - copy.readFrom(out.bytes().streamInput()); + final GetPrivilegesResponse copy = new GetPrivilegesResponse(out.bytes().streamInput()); assertThat(copy.privileges(), Matchers.equalTo(original.privileges())); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestTests.java index e1bdc7687e3e2..f67d62d3bf69b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestTests.java @@ -39,8 +39,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); original.writeTo(out); - final PutPrivilegesRequest copy = new PutPrivilegesRequest(); - copy.readFrom(out.bytes().streamInput()); + final PutPrivilegesRequest copy = new PutPrivilegesRequest(out.bytes().streamInput()); assertThat(original.getPrivileges(), Matchers.equalTo(copy.getPrivileges())); assertThat(original.getRefreshPolicy(), Matchers.equalTo(copy.getRefreshPolicy())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponseTests.java index 431d7f326ee88..3c5dc389da7b4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponseTests.java @@ -33,8 +33,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput output = new BytesStreamOutput(); original.writeTo(output); output.flush(); - final PutPrivilegesResponse copy = new PutPrivilegesResponse(); - copy.readFrom(output.bytes().streamInput()); + final PutPrivilegesResponse copy = new PutPrivilegesResponse(output.bytes().streamInput()); assertThat(copy.created(), equalTo(original.created())); assertThat(Strings.toString(copy), equalTo(Strings.toString(original))); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index 7ca9f4da74ab3..742d8248bd10b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -63,11 +63,10 @@ public void testSerialization() throws IOException { } original.writeTo(out); - final PutRoleRequest copy = new PutRoleRequest(); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); StreamInput in = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), registry); in.setVersion(out.getVersion()); - copy.readFrom(in); + final PutRoleRequest copy = new PutRoleRequest(in); assertThat(copy.roleDescriptor(), equalTo(original.roleDescriptor())); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java index 54681e97fc531..29611b1063de6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java @@ -126,8 +126,7 @@ public void testSerialization() throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { request.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { - final CreateTokenRequest serialized = new CreateTokenRequest(); - serialized.readFrom(in); + final CreateTokenRequest serialized = new CreateTokenRequest(in); assertEquals(grantType, serialized.getGrantType()); if (scope != null) { assertEquals(scope, serialized.getScope()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java index e89357698c708..16cf2eb2dfe06 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java @@ -18,8 +18,7 @@ public void testSerialization() throws Exception { try (BytesStreamOutput output = new BytesStreamOutput()) { response.writeTo(output); try (StreamInput input = output.bytes().streamInput()) { - CreateTokenResponse serialized = new CreateTokenResponse(); - serialized.readFrom(input); + CreateTokenResponse serialized = new CreateTokenResponse(input); assertEquals(response, serialized); } } @@ -29,8 +28,7 @@ public void testSerialization() throws Exception { try (BytesStreamOutput output = new BytesStreamOutput()) { response.writeTo(output); try (StreamInput input = output.bytes().streamInput()) { - CreateTokenResponse serialized = new CreateTokenResponse(); - serialized.readFrom(input); + CreateTokenResponse serialized = new CreateTokenResponse(input); assertEquals(response, serialized); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponseTests.java index bbfba920e385a..657799dc735bf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponseTests.java @@ -34,8 +34,7 @@ public void testSerialization() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { response.writeTo(output); try (StreamInput input = output.bytes().streamInput()) { - InvalidateTokenResponse serialized = new InvalidateTokenResponse(); - serialized.readFrom(input); + InvalidateTokenResponse serialized = new InvalidateTokenResponse(input); assertThat(serialized.getResult().getInvalidatedTokens(), equalTo(response.getResult().getInvalidatedTokens())); assertThat(serialized.getResult().getPreviouslyInvalidatedTokens(), equalTo(response.getResult().getPreviouslyInvalidatedTokens())); @@ -51,8 +50,7 @@ public void testSerialization() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { response.writeTo(output); try (StreamInput input = output.bytes().streamInput()) { - InvalidateTokenResponse serialized = new InvalidateTokenResponse(); - serialized.readFrom(input); + InvalidateTokenResponse serialized = new InvalidateTokenResponse(input); assertThat(serialized.getResult().getInvalidatedTokens(), equalTo(response.getResult().getInvalidatedTokens())); assertThat(serialized.getResult().getPreviouslyInvalidatedTokens(), equalTo(response.getResult().getPreviouslyInvalidatedTokens())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java index a9e60fff3a167..d9d7f559c4b13 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java @@ -48,10 +48,10 @@ public void testSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); original.writeTo(out); - final GetUserPrivilegesResponse copy = new GetUserPrivilegesResponse(); + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); StreamInput in = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), registry); - copy.readFrom(in); + final GetUserPrivilegesResponse copy = new GetUserPrivilegesResponse(in); assertThat(copy.getClusterPrivileges(), equalTo(original.getClusterPrivileges())); assertThat(copy.getConditionalClusterPrivileges(), equalTo(original.getConditionalClusterPrivileges())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java index 6dd1d8a25f088..abf1505e38e2e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java @@ -75,10 +75,10 @@ private HasPrivilegesRequest serializeAndDeserialize(HasPrivilegesRequest origin out.setVersion(version); original.writeTo(out); - final HasPrivilegesRequest copy = new HasPrivilegesRequest(); + final StreamInput in = out.bytes().streamInput(); in.setVersion(version); - copy.readFrom(in); + final HasPrivilegesRequest copy = new HasPrivilegesRequest(in); assertThat(in.read(), equalTo(-1)); return copy; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 4af7dd2e57d62..a21c3655a6a06 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -204,4 +204,34 @@ public void testIlmPrivileges() { assertThat(predicate.test("indices:admin/whatever"), is(false)); } } + + public void testSlmPriviledges() { + { + Predicate predicate = ClusterPrivilege.MANAGE_SLM.predicate(); + // check cluster actions + assertThat(predicate.test("cluster:admin/slm/delete"), is(true)); + assertThat(predicate.test("cluster:admin/slm/put"), is(true)); + assertThat(predicate.test("cluster:admin/slm/get"), is(true)); + assertThat(predicate.test("cluster:admin/ilm/start"), is(true)); + assertThat(predicate.test("cluster:admin/ilm/stop"), is(true)); + assertThat(predicate.test("cluster:admin/slm/execute"), is(true)); + assertThat(predicate.test("cluster:admin/ilm/operation_mode/get"), is(true)); + // check non-slm action + assertThat(predicate.test("cluster:admin/whatever"), is(false)); + } + + { + Predicate predicate = ClusterPrivilege.READ_SLM.predicate(); + // check cluster actions + assertThat(predicate.test("cluster:admin/slm/delete"), is(false)); + assertThat(predicate.test("cluster:admin/slm/put"), is(false)); + assertThat(predicate.test("cluster:admin/slm/get"), is(true)); + assertThat(predicate.test("cluster:admin/ilm/start"), is(false)); + assertThat(predicate.test("cluster:admin/ilm/stop"), is(false)); + assertThat(predicate.test("cluster:admin/slm/execute"), is(false)); + assertThat(predicate.test("cluster:admin/ilm/operation_mode/get"), is(true)); + // check non-slm action + assertThat(predicate.test("cluster:admin/whatever"), is(false)); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotInvocationRecordTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotInvocationRecordTests.java new file mode 100644 index 0000000000000..af9511b183e9e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotInvocationRecordTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class SnapshotInvocationRecordTests extends AbstractSerializingTestCase { + + @Override + protected SnapshotInvocationRecord doParseInstance(XContentParser parser) throws IOException { + return SnapshotInvocationRecord.parse(parser, null); + } + + @Override + protected SnapshotInvocationRecord createTestInstance() { + return randomSnapshotInvocationRecord(); + } + + @Override + protected Writeable.Reader instanceReader() { + return SnapshotInvocationRecord::new; + } + + @Override + protected SnapshotInvocationRecord mutateInstance(SnapshotInvocationRecord instance) { + switch (between(0, 2)) { + case 0: + return new SnapshotInvocationRecord( + randomValueOtherThan(instance.getSnapshotName(), () -> randomAlphaOfLengthBetween(2,10)), + instance.getTimestamp(), + instance.getDetails()); + case 1: + return new SnapshotInvocationRecord(instance.getSnapshotName(), + randomValueOtherThan(instance.getTimestamp(), ESTestCase::randomNonNegativeLong), + instance.getDetails()); + case 2: + return new SnapshotInvocationRecord(instance.getSnapshotName(), + instance.getTimestamp(), + randomValueOtherThan(instance.getDetails(), () -> randomAlphaOfLengthBetween(2,10))); + default: + throw new AssertionError("failure, got illegal switch case"); + } + } + + public static SnapshotInvocationRecord randomSnapshotInvocationRecord() { + return new SnapshotInvocationRecord( + randomAlphaOfLengthBetween(5,10), + randomNonNegativeLong(), + randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10)); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyItemTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyItemTests.java new file mode 100644 index 0000000000000..e243a4bd3a1b9 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyItemTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadataTests.createRandomPolicy; +import static org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadataTests.createRandomPolicyMetadata; + +public class SnapshotLifecyclePolicyItemTests extends AbstractWireSerializingTestCase { + + @Override + protected SnapshotLifecyclePolicyItem createTestInstance() { + return new SnapshotLifecyclePolicyItem(createRandomPolicyMetadata(randomAlphaOfLengthBetween(5, 10))); + } + + @Override + protected SnapshotLifecyclePolicyItem mutateInstance(SnapshotLifecyclePolicyItem instance) { + switch (between(0, 4)) { + case 0: + String newPolicyId = randomValueOtherThan(instance.getPolicy().getId(), () -> randomAlphaOfLengthBetween(5, 10)); + return new SnapshotLifecyclePolicyItem(createRandomPolicy(newPolicyId), + instance.getVersion(), + instance.getModifiedDate(), + instance.getLastSuccess(), + instance.getLastFailure()); + case 1: + return new SnapshotLifecyclePolicyItem(instance.getPolicy(), + randomValueOtherThan(instance.getVersion(), ESTestCase::randomNonNegativeLong), + instance.getModifiedDate(), + instance.getLastSuccess(), + instance.getLastFailure()); + case 2: + return new SnapshotLifecyclePolicyItem(instance.getPolicy(), + instance.getVersion(), + randomValueOtherThan(instance.getModifiedDate(), ESTestCase::randomNonNegativeLong), + instance.getLastSuccess(), + instance.getLastFailure()); + case 3: + return new SnapshotLifecyclePolicyItem(instance.getPolicy(), + instance.getVersion(), + instance.getModifiedDate(), + randomValueOtherThan(instance.getLastSuccess(), + SnapshotInvocationRecordTests::randomSnapshotInvocationRecord), + instance.getLastFailure()); + case 4: + return new SnapshotLifecyclePolicyItem(instance.getPolicy(), + instance.getVersion(), + instance.getModifiedDate(), + instance.getLastSuccess(), + randomValueOtherThan(instance.getLastFailure(), + SnapshotInvocationRecordTests::randomSnapshotInvocationRecord)); + default: + throw new AssertionError("failure, got illegal switch case"); + } + } + + @Override + protected Writeable.Reader instanceReader() { + return SnapshotLifecyclePolicyItem::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyMetadataTests.java new file mode 100644 index 0000000000000..39fc692bfc905 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/SnapshotLifecyclePolicyMetadataTests.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotInvocationRecordTests.randomSnapshotInvocationRecord; + +public class SnapshotLifecyclePolicyMetadataTests extends AbstractSerializingTestCase { + private String policyId; + + @Override + protected SnapshotLifecyclePolicyMetadata doParseInstance(XContentParser parser) throws IOException { + return SnapshotLifecyclePolicyMetadata.PARSER.apply(parser, policyId); + } + + @Override + protected SnapshotLifecyclePolicyMetadata createTestInstance() { + policyId = randomAlphaOfLength(5); + return createRandomPolicyMetadata(policyId); + } + + private static Map randomHeaders() { + Map headers = new HashMap<>(); + int headerCount = randomIntBetween(1,10); + for (int i = 0; i < headerCount; i++) { + headers.put(randomAlphaOfLengthBetween(5,10), randomAlphaOfLengthBetween(5,10)); + } + return headers; + } + + @Override + protected Writeable.Reader instanceReader() { + return SnapshotLifecyclePolicyMetadata::new; + } + + @Override + protected SnapshotLifecyclePolicyMetadata mutateInstance(SnapshotLifecyclePolicyMetadata instance) throws IOException { + switch (between(0, 5)) { + case 0: + return SnapshotLifecyclePolicyMetadata.builder(instance) + .setPolicy(randomValueOtherThan(instance.getPolicy(), () -> createRandomPolicy(randomAlphaOfLength(10)))) + .build(); + case 1: + return SnapshotLifecyclePolicyMetadata.builder(instance) + .setVersion(randomValueOtherThan(instance.getVersion(), ESTestCase::randomNonNegativeLong)) + .build(); + case 2: + return SnapshotLifecyclePolicyMetadata.builder(instance) + .setModifiedDate(randomValueOtherThan(instance.getModifiedDate(), ESTestCase::randomNonNegativeLong)) + .build(); + case 3: + return SnapshotLifecyclePolicyMetadata.builder(instance) + .setHeaders(randomValueOtherThan(instance.getHeaders(), SnapshotLifecyclePolicyMetadataTests::randomHeaders)) + .build(); + case 4: + return SnapshotLifecyclePolicyMetadata.builder(instance) + .setLastSuccess(randomValueOtherThan(instance.getLastSuccess(), + SnapshotInvocationRecordTests::randomSnapshotInvocationRecord)) + .build(); + case 5: + return SnapshotLifecyclePolicyMetadata.builder(instance) + .setLastFailure(randomValueOtherThan(instance.getLastFailure(), + SnapshotInvocationRecordTests::randomSnapshotInvocationRecord)) + .build(); + default: + throw new AssertionError("failure, got illegal switch case"); + } + } + + public static SnapshotLifecyclePolicyMetadata createRandomPolicyMetadata(String policyId) { + SnapshotLifecyclePolicyMetadata.Builder builder = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(createRandomPolicy(policyId)) + .setVersion(randomNonNegativeLong()) + .setModifiedDate(randomNonNegativeLong()); + if (randomBoolean()) { + builder.setHeaders(randomHeaders()); + } + if (randomBoolean()) { + builder.setLastSuccess(randomSnapshotInvocationRecord()); + } + if (randomBoolean()) { + builder.setLastFailure(randomSnapshotInvocationRecord()); + } + return builder.build(); + } + + public static SnapshotLifecyclePolicy createRandomPolicy(String policyId) { + Map config = new HashMap<>(); + for (int i = 0; i < randomIntBetween(2, 5); i++) { + config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); + } + return new SnapshotLifecyclePolicy(policyId, + randomAlphaOfLength(4), + randomSchedule(), + randomAlphaOfLength(4), + config); + } + + private static String randomSchedule() { + return randomIntBetween(0, 59) + " " + + randomIntBetween(0, 59) + " " + + randomIntBetween(0, 12) + " * * ?"; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryItemTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryItemTests.java new file mode 100644 index 0000000000000..0622398e08fbe --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryItemTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.history; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +public class SnapshotHistoryItemTests extends AbstractSerializingTestCase { + + @Override + protected SnapshotHistoryItem doParseInstance(XContentParser parser) throws IOException { + return SnapshotHistoryItem.parse(parser, this.getClass().getCanonicalName()); + } + + @Override + protected Writeable.Reader instanceReader() { + return SnapshotHistoryItem::new; + } + + @Override + protected SnapshotHistoryItem createTestInstance() { + long timestamp = randomNonNegativeLong(); + String policyId = randomAlphaOfLengthBetween(5, 10); + String repository = randomAlphaOfLengthBetween(5, 10); + String snapshotName = randomAlphaOfLengthBetween(5, 10); + String operation = randomAlphaOfLengthBetween(5, 10); + boolean success = randomBoolean(); + Map snapshotConfig = randomSnapshotConfiguration(); + String errorDetails = randomBoolean() ? null : randomAlphaOfLengthBetween(10, 20); + + return new SnapshotHistoryItem(timestamp, policyId, repository, snapshotName, operation, success, snapshotConfig, + errorDetails); + } + + @Override + protected SnapshotHistoryItem mutateInstance(SnapshotHistoryItem instance) { + final int branch = between(0, 7); + switch (branch) { + case 0: // New timestamp + return new SnapshotHistoryItem( + randomValueOtherThan(instance.getTimestamp(), ESTestCase::randomNonNegativeLong), + instance.getPolicyId(), instance.getRepository(), instance.getSnapshotName(), instance.getOperation(), + instance.isSuccess(), instance.getSnapshotConfiguration(), instance.getErrorDetails()); + case 1: // new policyId + return new SnapshotHistoryItem(instance.getTimestamp(), + randomValueOtherThan(instance.getPolicyId(), () -> randomAlphaOfLengthBetween(5, 10)), + instance.getSnapshotName(), instance.getRepository(), instance.getOperation(), instance.isSuccess(), + instance.getSnapshotConfiguration(), instance.getErrorDetails()); + case 2: // new repo name + return new SnapshotHistoryItem(instance.getTimestamp(), instance.getPolicyId(), instance.getSnapshotName(), + randomValueOtherThan(instance.getRepository(), () -> randomAlphaOfLengthBetween(5, 10)), + instance.getOperation(), instance.isSuccess(), instance.getSnapshotConfiguration(), instance.getErrorDetails()); + case 3: + return new SnapshotHistoryItem(instance.getTimestamp(), instance.getPolicyId(), instance.getRepository(), + randomValueOtherThan(instance.getSnapshotName(), () -> randomAlphaOfLengthBetween(5, 10)), + instance.getOperation(), instance.isSuccess(), instance.getSnapshotConfiguration(), instance.getErrorDetails()); + case 4: + return new SnapshotHistoryItem(instance.getTimestamp(), instance.getPolicyId(), instance.getRepository(), + instance.getSnapshotName(), + randomValueOtherThan(instance.getOperation(), () -> randomAlphaOfLengthBetween(5, 10)), + instance.isSuccess(), instance.getSnapshotConfiguration(), instance.getErrorDetails()); + case 5: + return new SnapshotHistoryItem(instance.getTimestamp(), instance.getPolicyId(), instance.getRepository(), + instance.getSnapshotName(), + instance.getOperation(), + instance.isSuccess() == false, + instance.getSnapshotConfiguration(), instance.getErrorDetails()); + case 6: + return new SnapshotHistoryItem(instance.getTimestamp(), instance.getPolicyId(), instance.getRepository(), + instance.getSnapshotName(), instance.getOperation(), instance.isSuccess(), + randomValueOtherThan(instance.getSnapshotConfiguration(), + SnapshotHistoryItemTests::randomSnapshotConfiguration), + instance.getErrorDetails()); + case 7: + return new SnapshotHistoryItem(instance.getTimestamp(), instance.getPolicyId(), instance.getRepository(), + instance.getSnapshotName(), instance.getOperation(), instance.isSuccess(), instance.getSnapshotConfiguration(), + randomValueOtherThan(instance.getErrorDetails(), () -> randomAlphaOfLengthBetween(10, 20))); + default: + throw new IllegalArgumentException("illegal randomization: " + branch); + } + } + + public static Map randomSnapshotConfiguration() { + Map configuration = new HashMap<>(); + configuration.put("indices", Arrays.asList(generateRandomStringArray(1, 10, false, false))); + if (frequently()) { + configuration.put("ignore_unavailable", randomBoolean()); + } + if (frequently()) { + configuration.put("include_global_state", randomBoolean()); + } + if (frequently()) { + configuration.put("partial", randomBoolean()); + } + return configuration; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryStoreTests.java new file mode 100644 index 0000000000000..43d069cfa0d9e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotHistoryStoreTests.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.history; + +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy; +import org.junit.After; +import org.junit.Before; + +import java.time.Instant; +import java.time.ZoneOffset; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING; +import static org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryStore.getHistoryIndexNameForTime; +import static org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotLifecycleTemplateRegistry.INDEX_TEMPLATE_VERSION; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.core.IsEqual.equalTo; + +public class SnapshotHistoryStoreTests extends ESTestCase { + + private ThreadPool threadPool; + private SnapshotLifecycleTemplateRegistryTests.VerifyingClient client; + private SnapshotHistoryStore historyStore; + + @Before + public void setup() { + threadPool = new TestThreadPool(this.getClass().getName()); + client = new SnapshotLifecycleTemplateRegistryTests.VerifyingClient(threadPool); + historyStore = new SnapshotHistoryStore(Settings.EMPTY, client, ZoneOffset.UTC); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testNoActionIfDisabled() { + Settings settings = Settings.builder().put(SLM_HISTORY_INDEX_ENABLED_SETTING.getKey(), false).build(); + SnapshotHistoryStore disabledHistoryStore = new SnapshotHistoryStore(settings, client, ZoneOffset.UTC); + String policyId = randomAlphaOfLength(5); + SnapshotLifecyclePolicy policy = randomSnapshotLifecyclePolicy(policyId); + final long timestamp = randomNonNegativeLong(); + SnapshotLifecyclePolicy.ResolverContext context = new SnapshotLifecyclePolicy.ResolverContext(timestamp); + String snapshotId = policy.generateSnapshotName(context); + SnapshotHistoryItem record = SnapshotHistoryItem.successRecord(timestamp, policy, snapshotId); + + client.setVerifier((a,r,l) -> { + fail("the history store is disabled, no action should have been taken"); + return null; + }); + disabledHistoryStore.putAsync(record); + } + + @SuppressWarnings("unchecked") + public void testPut() throws Exception { + String policyId = randomAlphaOfLength(5); + SnapshotLifecyclePolicy policy = randomSnapshotLifecyclePolicy(policyId); + final long timestamp = randomNonNegativeLong(); + SnapshotLifecyclePolicy.ResolverContext context = new SnapshotLifecyclePolicy.ResolverContext(timestamp); + String snapshotId = policy.generateSnapshotName(context); + { + SnapshotHistoryItem record = SnapshotHistoryItem.successRecord(timestamp, policy, snapshotId); + + AtomicInteger calledTimes = new AtomicInteger(0); + client.setVerifier((action, request, listener) -> { + calledTimes.incrementAndGet(); + assertThat(action, instanceOf(IndexAction.class)); + assertThat(request, instanceOf(IndexRequest.class)); + IndexRequest indexRequest = (IndexRequest) request; + assertEquals(getHistoryIndexNameForTime(Instant.ofEpochMilli(timestamp).atZone(ZoneOffset.UTC)), indexRequest.index()); + final String indexedDocument = indexRequest.source().utf8ToString(); + assertThat(indexedDocument, containsString(policy.getId())); + assertThat(indexedDocument, containsString(policy.getRepository())); + assertThat(indexedDocument, containsString(snapshotId)); + if (policy.getConfig() != null) { + assertContainsMap(indexedDocument, policy.getConfig()); + } + assertNotNull(listener); + // The content of this IndexResponse doesn't matter, so just make it 100% random + return new IndexResponse( + new ShardId(randomAlphaOfLength(5), randomAlphaOfLength(5), randomInt(100)), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomLongBetween(1,1000), + randomLongBetween(1,1000), + randomLongBetween(1,1000), + randomBoolean()); + }); + + historyStore.putAsync(record); + assertBusy(() -> assertThat(calledTimes.get(), equalTo(1))); + } + + { + final String cause = randomAlphaOfLength(9); + Exception failureException = new RuntimeException(cause); + SnapshotHistoryItem record = SnapshotHistoryItem.failureRecord(timestamp, policy, snapshotId, failureException); + + AtomicInteger calledTimes = new AtomicInteger(0); + client.setVerifier((action, request, listener) -> { + calledTimes.incrementAndGet(); + assertThat(action, instanceOf(IndexAction.class)); + assertThat(request, instanceOf(IndexRequest.class)); + IndexRequest indexRequest = (IndexRequest) request; + assertEquals(getHistoryIndexNameForTime(Instant.ofEpochMilli(timestamp).atZone(ZoneOffset.UTC)), indexRequest.index()); + final String indexedDocument = indexRequest.source().utf8ToString(); + assertThat(indexedDocument, containsString(policy.getId())); + assertThat(indexedDocument, containsString(policy.getRepository())); + assertThat(indexedDocument, containsString(snapshotId)); + if (policy.getConfig() != null) { + assertContainsMap(indexedDocument, policy.getConfig()); + } + assertThat(indexedDocument, containsString("runtime_exception")); + assertThat(indexedDocument, containsString(cause)); + assertNotNull(listener); + // The content of this IndexResponse doesn't matter, so just make it 100% random + return new IndexResponse( + new ShardId(randomAlphaOfLength(5), randomAlphaOfLength(5), randomInt(100)), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomLongBetween(1,1000), + randomLongBetween(1,1000), + randomLongBetween(1,1000), + randomBoolean()); + }); + + historyStore.putAsync(record); + assertBusy(() -> assertThat(calledTimes.get(), equalTo(1))); + } + } + + @SuppressWarnings("unchecked") + private void assertContainsMap(String indexedDocument, Map map) { + map.forEach((k, v) -> { + assertThat(indexedDocument, containsString(k)); + if (v instanceof Map) { + assertContainsMap(indexedDocument, (Map) v); + } if (v instanceof Iterable) { + ((Iterable) v).forEach(elem -> { + assertThat(indexedDocument, containsString(elem.toString())); + }); + } else { + assertThat(indexedDocument, containsString(v.toString())); + } + }); + } + + + public void testIndexNameGeneration() { + String indexTemplateVersion = INDEX_TEMPLATE_VERSION; + assertThat(getHistoryIndexNameForTime(Instant.ofEpochMilli((long) 0).atZone(ZoneOffset.UTC)), + equalTo(".slm-history-"+ indexTemplateVersion +"-1970.01")); + assertThat(getHistoryIndexNameForTime(Instant.ofEpochMilli(100000000000L).atZone(ZoneOffset.UTC)), + equalTo(".slm-history-" + indexTemplateVersion + "-1973.03")); + assertThat(getHistoryIndexNameForTime(Instant.ofEpochMilli(1416582852000L).atZone(ZoneOffset.UTC)), + equalTo(".slm-history-" + indexTemplateVersion + "-2014.11")); + assertThat(getHistoryIndexNameForTime(Instant.ofEpochMilli(2833165811000L).atZone(ZoneOffset.UTC)), + equalTo(".slm-history-" + indexTemplateVersion + "-2059.10")); + } + + public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String id) { + Map config = null; + if (randomBoolean()) { + config = new HashMap<>(); + for (int i = 0; i < randomIntBetween(2, 5); i++) { + config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); + } + } + return new SnapshotLifecyclePolicy(id, + randomAlphaOfLength(4), + randomSchedule(), + randomAlphaOfLength(4), + config); + } + + private static String randomSchedule() { + return randomIntBetween(0, 59) + " " + + randomIntBetween(0, 59) + " " + + randomIntBetween(0, 12) + " * * ?"; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotLifecycleTemplateRegistryTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotLifecycleTemplateRegistryTests.java new file mode 100644 index 0000000000000..d7e10cdb20dfb --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/snapshotlifecycle/history/SnapshotLifecycleTemplateRegistryTests.java @@ -0,0 +1,332 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.snapshotlifecycle.history; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.TriFunction; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.elasticsearch.mock.orig.Mockito.when; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING; +import static org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotLifecycleTemplateRegistry.SLM_POLICY_NAME; +import static org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotLifecycleTemplateRegistry.SLM_TEMPLATE_NAME; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; + +public class SnapshotLifecycleTemplateRegistryTests extends ESTestCase { + private SnapshotLifecycleTemplateRegistry registry; + private NamedXContentRegistry xContentRegistry; + private ClusterService clusterService; + private ThreadPool threadPool; + private VerifyingClient client; + + @Before + public void createRegistryAndClient() { + threadPool = new TestThreadPool(this.getClass().getName()); + client = new VerifyingClient(threadPool); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse))); + xContentRegistry = new NamedXContentRegistry(entries); + registry = new SnapshotLifecycleTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testDisabledDoesNotAddTemplates() { + Settings settings = Settings.builder().put(SLM_HISTORY_INDEX_ENABLED_SETTING.getKey(), false).build(); + SnapshotLifecycleTemplateRegistry disabledRegistry = new SnapshotLifecycleTemplateRegistry(settings, clusterService, threadPool, + client, xContentRegistry); + assertThat(disabledRegistry.getTemplateConfigs(), hasSize(0)); + assertThat(disabledRegistry.getPolicyConfigs(), hasSize(0)); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/43950") + public void testThatNonExistingTemplatesAreAddedImmediately() throws Exception { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), nodes); + + AtomicInteger calledTimes = new AtomicInteger(0); + client.setVerifier((action, request, listener) -> { + if (action instanceof PutIndexTemplateAction) { + calledTimes.incrementAndGet(); + assertThat(action, instanceOf(PutIndexTemplateAction.class)); + assertThat(request, instanceOf(PutIndexTemplateRequest.class)); + final PutIndexTemplateRequest putRequest = (PutIndexTemplateRequest) request; + assertThat(putRequest.name(), equalTo(SLM_TEMPLATE_NAME)); + assertThat(putRequest.settings().get("index.lifecycle.name"), equalTo(SLM_POLICY_NAME)); + assertNotNull(listener); + return new TestPutIndexTemplateResponse(true); + } else if (action instanceof PutLifecycleAction) { + // Ignore this, it's verified in another test + return new PutLifecycleAction.Response(true); + } else { + fail("client called with unexpected request:" + request.toString()); + return null; + } + }); + registry.clusterChanged(event); + assertBusy(() -> assertThat(calledTimes.get(), equalTo(registry.getTemplateConfigs().size()))); + + calledTimes.set(0); + // now delete one template from the cluster state and lets retry + ClusterChangedEvent newEvent = createClusterChangedEvent(Collections.emptyList(), nodes); + registry.clusterChanged(newEvent); + assertBusy(() -> assertThat(calledTimes.get(), equalTo(1))); + } + + public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + AtomicInteger calledTimes = new AtomicInteger(0); + client.setVerifier((action, request, listener) -> { + if (action instanceof PutLifecycleAction) { + calledTimes.incrementAndGet(); + assertThat(action, instanceOf(PutLifecycleAction.class)); + assertThat(request, instanceOf(PutLifecycleAction.Request.class)); + final PutLifecycleAction.Request putRequest = (PutLifecycleAction.Request) request; + assertThat(putRequest.getPolicy().getName(), equalTo(SLM_POLICY_NAME)); + assertNotNull(listener); + return new PutLifecycleAction.Response(true); + } else if (action instanceof PutIndexTemplateAction) { + // Ignore this, it's verified in another test + return new TestPutIndexTemplateResponse(true); + } else { + fail("client called with unexpected request:" + request.toString()); + return null; + } + }); + + ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), nodes); + registry.clusterChanged(event); + assertBusy(() -> assertThat(calledTimes.get(), equalTo(1))); + } + + public void testPolicyAlreadyExists() { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + Map policyMap = new HashMap<>(); + List policies = registry.getPolicyConfigs().stream() + .map(policyConfig -> policyConfig.load(xContentRegistry)) + .collect(Collectors.toList()); + assertThat(policies, hasSize(1)); + LifecyclePolicy policy = policies.get(0); + policyMap.put(policy.getName(), policy); + + client.setVerifier((action, request, listener) -> { + if (action instanceof PutIndexTemplateAction) { + // Ignore this, it's verified in another test + return new TestPutIndexTemplateResponse(true); + } else if (action instanceof PutLifecycleAction) { + fail("if the policy already exists it should be re-put"); + } else { + fail("client called with unexpected request:" + request.toString()); + } + return null; + }); + + ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), policyMap, nodes); + registry.clusterChanged(event); + } + + public void testPolicyAlreadyExistsButDiffers() throws IOException { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + Map policyMap = new HashMap<>(); + String policyStr = "{\"phases\":{\"delete\":{\"min_age\":\"1m\",\"actions\":{\"delete\":{}}}}}"; + List policies = registry.getPolicyConfigs().stream() + .map(policyConfig -> policyConfig.load(xContentRegistry)) + .collect(Collectors.toList()); + assertThat(policies, hasSize(1)); + LifecyclePolicy policy = policies.get(0); + + client.setVerifier((action, request, listener) -> { + if (action instanceof PutIndexTemplateAction) { + // Ignore this, it's verified in another test + return new TestPutIndexTemplateResponse(true); + } else if (action instanceof PutLifecycleAction) { + fail("if the policy already exists it should be re-put"); + } else { + fail("client called with unexpected request:" + request.toString()); + } + return null; + }); + + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.THROW_UNSUPPORTED_OPERATION, policyStr)) { + LifecyclePolicy different = LifecyclePolicy.parse(parser, policy.getName()); + policyMap.put(policy.getName(), different); + ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), policyMap, nodes); + registry.clusterChanged(event); + } + } + + public void testThatMissingMasterNodeDoesNothing() { + DiscoveryNode localNode = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").add(localNode).build(); + + client.setVerifier((a,r,l) -> { + fail("if the master is missing nothing should happen"); + return null; + }); + + ClusterChangedEvent event = createClusterChangedEvent(Arrays.asList(SLM_TEMPLATE_NAME), nodes); + registry.clusterChanged(event); + } + + public void testValidate() { + assertFalse(registry.validate(createClusterState(Settings.EMPTY, Collections.emptyList(), Collections.emptyMap(), null))); + assertFalse(registry.validate(createClusterState(Settings.EMPTY, List.of(SLM_TEMPLATE_NAME), Collections.emptyMap(), null))); + + Map policyMap = new HashMap<>(); + policyMap.put(SLM_POLICY_NAME, new LifecyclePolicy(SLM_POLICY_NAME, new HashMap<>())); + assertFalse(registry.validate(createClusterState(Settings.EMPTY, Collections.emptyList(), policyMap, null))); + + assertTrue(registry.validate(createClusterState(Settings.EMPTY, List.of(SLM_TEMPLATE_NAME), policyMap, null))); + } + + // ------------- + + /** + * A client that delegates to a verifying function for action/request/listener + */ + public static class VerifyingClient extends NoOpClient { + + private TriFunction, ActionRequest, ActionListener, ActionResponse> verifier = (a, r, l) -> { + fail("verifier not set"); + return null; + }; + + VerifyingClient(ThreadPool threadPool) { + super(threadPool); + } + + @Override + @SuppressWarnings("unchecked") + protected void doExecute(ActionType action, + Request request, + ActionListener listener) { + listener.onResponse((Response) verifier.apply(action, request, listener)); + } + + public VerifyingClient setVerifier(TriFunction, ActionRequest, ActionListener, ActionResponse> verifier) { + this.verifier = verifier; + return this; + } + } + + private ClusterChangedEvent createClusterChangedEvent(List existingTemplateNames, DiscoveryNodes nodes) { + return createClusterChangedEvent(existingTemplateNames, Collections.emptyMap(), nodes); + } + + private ClusterChangedEvent createClusterChangedEvent(List existingTemplateNames, + Map existingPolicies, + DiscoveryNodes nodes) { + ClusterState cs = createClusterState(Settings.EMPTY, existingTemplateNames, existingPolicies, nodes); + ClusterChangedEvent realEvent = new ClusterChangedEvent("created-from-test", cs, + ClusterState.builder(new ClusterName("test")).build()); + ClusterChangedEvent event = spy(realEvent); + when(event.localNodeMaster()).thenReturn(nodes.isLocalNodeElectedMaster()); + + return event; + } + + private ClusterState createClusterState(Settings nodeSettings, + List existingTemplateNames, + Map existingPolicies, + DiscoveryNodes nodes) { + ImmutableOpenMap.Builder indexTemplates = ImmutableOpenMap.builder(); + for (String name : existingTemplateNames) { + indexTemplates.put(name, mock(IndexTemplateMetaData.class)); + } + + Map existingILMMeta = existingPolicies.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new LifecyclePolicyMetadata(e.getValue(), Collections.emptyMap(), 1, 1))); + IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata(existingILMMeta, OperationMode.RUNNING); + + return ClusterState.builder(new ClusterName("test")) + .metaData(MetaData.builder() + .templates(indexTemplates.build()) + .transientSettings(nodeSettings) + .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .build()) + .blocks(new ClusterBlocks.Builder().build()) + .nodes(nodes) + .build(); + } + + private static class TestPutIndexTemplateResponse extends AcknowledgedResponse { + TestPutIndexTemplateResponse(boolean acknowledged) { + super(acknowledged); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsageTests.java new file mode 100644 index 0000000000000..27b0d82ac5b9d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsageTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.spatial; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class SpatialFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected SpatialFeatureSetUsage createTestInstance() { + boolean available = randomBoolean(); + boolean enabled = randomBoolean(); + return new SpatialFeatureSetUsage(available, enabled); + } + + @Override + protected SpatialFeatureSetUsage mutateInstance(SpatialFeatureSetUsage instance) throws IOException { + boolean available = instance.available(); + boolean enabled = instance.enabled(); + switch (between(0, 1)) { + case 0: + available = available == false; + break; + case 1: + enabled = enabled == false; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new SpatialFeatureSetUsage(available, enabled); + } + + @Override + protected Writeable.Reader instanceReader() { + return SpatialFeatureSetUsage::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoServiceTests.java index e1f0181e7dd00..79e68e03c3a5e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoServiceTests.java @@ -54,6 +54,11 @@ public void testEncryptedChar() throws Exception { assertThat(service.isEncrypted(service.encrypt(randomAlphaOfLength(10).toCharArray())), is(true)); } + public void testErrorMessageWhenSecureEncryptionKeySettingDoesNotExist() throws Exception { + final ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> new CryptoService(Settings.EMPTY)); + assertThat(e.getMessage(), is("setting [" + WatcherField.ENCRYPTION_KEY_SETTING.getKey() + "] must be set in keystore")); + } + public static byte[] generateKey() { try { KeyGenerator generator = KeyGenerator.getInstance(CryptoService.KEY_ALGO); diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle b/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle index 9fcf079668780..7708f627c535e 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle @@ -29,7 +29,7 @@ processTestResources.dependsOn(copyKeyCerts) integTest.dependsOn copyKeyCerts testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java index a1d8aca86ba16..09b34f7ed7b20 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java @@ -210,6 +210,7 @@ protected DataFrameTransformConfig.Builder createTransformConfigBuilder(String i .setId(id) .setSource(SourceConfig.builder().setIndex(sourceIndices).setQueryConfig(createQueryConfig(queryBuilder)).build()) .setDest(DestConfig.builder().setIndex(destinationIndex).build()) + .setFrequency(TimeValue.timeValueSeconds(10)) .setPivotConfig(createPivotConfig(groups, aggregations)) .setDescription("Test data frame transform config id: " + id); } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle b/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle index a33e73ba4c318..304b05d2f7c73 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle @@ -12,7 +12,7 @@ dependencies { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' user username: "x_pack_rest_user", password: "x-pack-test-password" diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index a45fee6d9666f..fd6d21db045b5 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -214,6 +214,7 @@ public void testGetProgressResetWithContinuous() throws Exception { final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, null); String config = "{ \"dest\": {\"index\":\"" + transformDest + "\"}," + " \"source\": {\"index\":\"" + transformSrc + "\"}," + + " \"frequency\": \"1s\"," + " \"sync\": {\"time\":{\"field\": \"timestamp\", \"delay\": \"1s\"}}," + " \"pivot\": {" + " \"group_by\": {" diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index f2cd95ed1a9c7..4fb8ea6fafdd6 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -141,6 +141,7 @@ public void testContinuousPivot() throws Exception { String config = "{" + " \"source\": {\"index\":\"" + indexName + "\"}," + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"}," + + " \"frequency\": \"1s\"," + " \"sync\": {\"time\": {\"field\": \"timestamp\", \"delay\": \"1s\"}}," + " \"pivot\": {" + " \"group_by\": {" diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index c8d7bf28842e3..b8c6744b90d3c 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -51,11 +51,9 @@ protected Settings restClientSettings() { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE_SUPER_USER).build(); } - protected void createReviewsIndex(String indexName) throws IOException { + protected void createReviewsIndex(String indexName, int numDocs) throws IOException { int[] distributionTable = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 1, 1, 1}; - final int numDocs = 1000; - // create mapping try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -146,6 +144,10 @@ protected void createReviewsIndex() throws IOException { createReviewsIndex(REVIEWS_INDEX_NAME); } + protected void createReviewsIndex(String indexName) throws IOException { + createReviewsIndex(indexName, 1000); + } + protected void createPivotReviewsTransform(String transformId, String dataFrameIndex, String query) throws IOException { createPivotReviewsTransform(transformId, dataFrameIndex, query, null); } @@ -161,7 +163,9 @@ protected void createContinuousPivotReviewsTransform(String transformId, String String config = "{ \"dest\": {\"index\":\"" + dataFrameIndex + "\"}," + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," + //Set frequency high for testing + " \"sync\": {\"time\":{\"field\": \"timestamp\", \"delay\": \"15m\"}}," + + " \"frequency\": \"1s\"," + " \"pivot\": {" + " \"group_by\": {" + " \"reviewer\": {" @@ -210,7 +214,8 @@ protected void createPivotReviewsTransform(String transformId, String dataFrameI + " \"avg_rating\": {" + " \"avg\": {" + " \"field\": \"stars\"" - + " } } } }" + + " } } } }," + + "\"frequency\":\"1s\"" + "}"; createDataframeTransformRequest.setJsonEntity(config); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 96aeeda8755f4..0bf668ddadde6 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,87 +6,124 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.oneOf; public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { - public void testDummy() { - // remove once the awaits fix below is resolved + private static final String TRANSFORM_ID = "failure_pivot_1"; + + @Before + public void setClusterSettings() throws IOException { + // Make sure we never retry on failure to speed up the test + Request addFailureRetrySetting = new Request("PUT", "/_cluster/settings"); + addFailureRetrySetting.setJsonEntity( + "{\"persistent\": {\"xpack.data_frame.num_transform_failure_retries\": \"" + 0 + "\"}}"); + client().performRequest(addFailureRetrySetting); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/40543") - public void testFailureStateInteraction() throws Exception { - createReviewsIndex(); - String transformId = "failure_pivot_1"; - String dataFrameIndex = "failure_pivot_reviews"; - createPivotReviewsTransform(transformId, dataFrameIndex, null); - deleteIndex(REVIEWS_INDEX_NAME); // trigger start failure due to index missing - startDataframeTransform(transformId, false); - awaitState(transformId, DataFrameTransformTaskState.FAILED); - Map fullState = getDataFrameState(transformId); + @After + public void cleanUpPotentiallyFailedTransform() throws Exception { + // If the tests failed in the middle, we should force stop it. This prevents other transform tests from failing due + // to this left over transform + stopDataFrameTransform(TRANSFORM_ID, true); + deleteDataFrameTransform(TRANSFORM_ID); + } + public void testForceStopFailedTransform() throws Exception { + createReviewsIndex(REVIEWS_INDEX_NAME, 10); + String dataFrameIndex = "failure_pivot_reviews"; + createDestinationIndexWithBadMapping(dataFrameIndex); + createContinuousPivotReviewsTransform(TRANSFORM_ID, dataFrameIndex, null); + startDataframeTransform(TRANSFORM_ID, false); + awaitState(TRANSFORM_ID, DataFrameTransformTaskState.FAILED); + Map fullState = getDataFrameState(TRANSFORM_ID); + final String failureReason = "task encountered more than 0 failures; latest failure: " + + "Bulk index experienced failures. See the logs of the node running the transform for details."; // Verify we have failed for the expected reason assertThat(XContentMapValues.extractValue("state.reason", fullState), - equalTo("task encountered irrecoverable failure: no such index [reviews]")); - assertThat(XContentMapValues.extractValue("state.indexer_state", fullState), equalTo("started")); + equalTo(failureReason)); - // Verify that we cannot stop or start the transform when the task is in a failed state - ResponseException ex = expectThrows(ResponseException.class, () -> stopDataFrameTransform(transformId, false)); + // verify that we cannot stop a failed transform + ResponseException ex = expectThrows(ResponseException.class, () -> stopDataFrameTransform(TRANSFORM_ID, false)); assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); assertThat(XContentMapValues.extractValue("error.reason", entityAsMap(ex.getResponse())), - equalTo("Unable to stop data frame transform [failure_pivot_1] as it is in a failed state with reason: [" + - "task encountered irrecoverable failure: no such index [reviews]]. Use force stop to stop the data frame transform.")); + equalTo("Unable to stop data frame transform [failure_pivot_1] as it is in a failed state with reason [" + + failureReason + + "]. Use force stop to stop the data frame transform.")); + + // Verify that we can force stop a failed transform + stopDataFrameTransform(TRANSFORM_ID, true); + + awaitState(TRANSFORM_ID, DataFrameTransformTaskState.STOPPED); + fullState = getDataFrameState(TRANSFORM_ID); + assertThat(XContentMapValues.extractValue("state.reason", fullState), + is(nullValue())); + } + - ex = expectThrows(ResponseException.class, () -> startDataframeTransform(transformId, false)); + public void testForceStartFailedTransform() throws Exception { + createReviewsIndex(REVIEWS_INDEX_NAME, 10); + String dataFrameIndex = "failure_pivot_reviews"; + createDestinationIndexWithBadMapping(dataFrameIndex); + createContinuousPivotReviewsTransform(TRANSFORM_ID, dataFrameIndex, null); + startDataframeTransform(TRANSFORM_ID, false); + awaitState(TRANSFORM_ID, DataFrameTransformTaskState.FAILED); + Map fullState = getDataFrameState(TRANSFORM_ID); + final String failureReason = "task encountered more than 0 failures; latest failure: " + + "Bulk index experienced failures. See the logs of the node running the transform for details."; + // Verify we have failed for the expected reason + assertThat(XContentMapValues.extractValue("state.reason", fullState), + equalTo(failureReason)); + + // Verify that we cannot start the transform when the task is in a failed state + ResponseException ex = expectThrows(ResponseException.class, () -> startDataframeTransform(TRANSFORM_ID, false)); assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); assertThat(XContentMapValues.extractValue("error.reason", entityAsMap(ex.getResponse())), equalTo("Unable to start data frame transform [failure_pivot_1] as it is in a failed state with failure: [" + - "task encountered irrecoverable failure: no such index [reviews]]. " + - "Use force start to restart data frame transform once error is resolved.")); + failureReason + + "]. Use force start to restart data frame transform once error is resolved.")); - // Correct the failure by creating the reviews index again - createReviewsIndex(); + // Correct the failure by deleting the destination index + deleteIndex(dataFrameIndex); // Force start the data frame to indicate failure correction - startDataframeTransform(transformId, true); - // Wait for data to be indexed appropriately and refresh for search - waitForDataFrameCheckpoint(transformId); - refreshIndex(dataFrameIndex); + startDataframeTransform(TRANSFORM_ID, true); // Verify that we have started and that our reason is cleared - fullState = getDataFrameState(transformId); + fullState = getDataFrameState(TRANSFORM_ID); assertThat(XContentMapValues.extractValue("state.reason", fullState), is(nullValue())); assertThat(XContentMapValues.extractValue("state.task_state", fullState), equalTo("started")); - assertThat(XContentMapValues.extractValue("state.indexer_state", fullState), equalTo("started")); - assertThat(XContentMapValues.extractValue("stats.search_failures", fullState), equalTo(1)); + assertThat(XContentMapValues.extractValue("state.indexer_state", fullState), is(oneOf("started", "indexing"))); + assertThat(XContentMapValues.extractValue("stats.index_failures", fullState), equalTo(1)); - // get and check some users to verify we restarted - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_0", 3.776978417); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_5", 3.72); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_20", 3.769230769); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); - - - stopDataFrameTransform(transformId, true); - deleteDataFrameTransform(transformId); + stopDataFrameTransform(TRANSFORM_ID, true); } private void awaitState(String transformId, DataFrameTransformTaskState state) throws Exception { assertBusy(() -> { String currentState = getDataFrameTaskState(transformId); - assertThat(state.value(), equalTo(currentState)); - }); + assertThat(currentState, equalTo(state.value())); + }, 180, TimeUnit.SECONDS); // It should not take this long, but if the scheduler gets deferred, it could } private void assertOnePivotValue(String query, double expected) throws IOException { @@ -96,4 +133,25 @@ private void assertOnePivotValue(String query, double expected) throws IOExcepti double actual = (Double) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); assertEquals(expected, actual, 0.000001); } + + private void createDestinationIndexWithBadMapping(String indexName) throws IOException { + // create mapping + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("mappings") + .startObject("properties") + .startObject("reviewer") + .field("type", "long") + .endObject() + .endObject() + .endObject(); + } + builder.endObject(); + final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + Request req = new Request("PUT", indexName); + req.setEntity(entity); + client().performRequest(req); + } + } } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index 002126e5b60e8..5c1efb48875e7 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -131,6 +131,7 @@ public void testGetProgress() throws Exception { destConfig, null, null, + null, pivotConfig, null); @@ -155,6 +156,7 @@ public void testGetProgress() throws Exception { destConfig, null, null, + null, pivotConfig, null); @@ -174,6 +176,7 @@ public void testGetProgress() throws Exception { destConfig, null, null, + null, pivotConfig, null); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index f83a546f12510..4494ee7201044 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; @@ -72,6 +73,7 @@ import org.elasticsearch.xpack.dataframe.rest.action.RestStartDataFrameTransformAction; import org.elasticsearch.xpack.dataframe.rest.action.RestStopDataFrameTransformAction; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformPersistentTasksExecutor; +import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; import java.io.IOException; import java.time.Clock; @@ -205,8 +207,19 @@ public List> getPersistentTasksExecutor(ClusterServic assert dataFrameAuditor.get() != null; assert dataFrameTransformsCheckpointService.get() != null; - return Collections.singletonList(new DataFrameTransformPersistentTasksExecutor(client, dataFrameTransformsConfigManager.get(), - dataFrameTransformsCheckpointService.get(), schedulerEngine.get(), dataFrameAuditor.get(), threadPool)); + return Collections.singletonList( + new DataFrameTransformPersistentTasksExecutor(client, + dataFrameTransformsConfigManager.get(), + dataFrameTransformsCheckpointService.get(), + schedulerEngine.get(), + dataFrameAuditor.get(), + threadPool, + clusterService, + settingsModule.getSettings())); + } + + public List> getSettings() { + return Collections.singletonList(DataFrameTransformTask.NUM_FAILURE_RETRIES_SETTING); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java index 3c6a9dda611c6..25f8550766f2a 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -24,24 +25,31 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Request; +import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import java.io.IOException; +import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + public class TransportDeleteDataFrameTransformAction extends TransportMasterNodeAction { private final DataFrameTransformsConfigManager transformsConfigManager; private final DataFrameAuditor auditor; + private final Client client; @Inject public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - DataFrameTransformsConfigManager transformsConfigManager, DataFrameAuditor auditor) { + DataFrameTransformsConfigManager transformsConfigManager, DataFrameAuditor auditor, + Client client) { super(DeleteDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); this.transformsConfigManager = transformsConfigManager; this.auditor = auditor; + this.client = client; } @Override @@ -54,26 +62,36 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, Request request, ClusterState state, - ActionListener listener) throws Exception { - PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { + ActionListener listener) { + final PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null && request.isForce() == false) { listener.onFailure(new ElasticsearchStatusException("Cannot delete data frame [" + request.getId() + "] as the task is running. Stop the task first", RestStatus.CONFLICT)); } else { - // Task is not running, delete the configuration document - transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap( - r -> { - auditor.info(request.getId(), "Deleted data frame transform."); - listener.onResponse(new AcknowledgedResponse(r)); - }, - listener::onFailure)); + ActionListener stopTransformActionListener = ActionListener.wrap( + stopResponse -> transformsConfigManager.deleteTransform(request.getId(), + ActionListener.wrap( + r -> { + auditor.info(request.getId(), "Deleted data frame transform."); + listener.onResponse(new AcknowledgedResponse(r)); + }, + listener::onFailure)), + listener::onFailure + ); + + if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { + executeAsyncWithOrigin(client, + DATA_FRAME_ORIGIN, + StopDataFrameTransformAction.INSTANCE, + new StopDataFrameTransformAction.Request(request.getId(), true, true, null, true), + ActionListener.wrap( + r -> stopTransformActionListener.onResponse(null), + stopTransformActionListener::onFailure)); + } else { + stopTransformActionListener.onResponse(null); + } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java index fa35719be8ae1..ddbcaad51665c 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java @@ -23,12 +23,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -49,20 +47,17 @@ import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.transforms.SourceDestValidator; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.io.IOException; import java.time.Instant; import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.stream.Collectors; -public class TransportPutDataFrameTransformAction - extends TransportMasterNodeAction { +public class TransportPutDataFrameTransformAction extends TransportMasterNodeAction { private final XPackLicenseState licenseState; private final Client client; @@ -97,13 +92,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - protected void masterOperation(Task task, Request request, ClusterState clusterState, ActionListener listener) - throws Exception { + protected void masterOperation(Task task, Request request, ClusterState clusterState, ActionListener listener) { if (!licenseState.isDataFrameAllowed()) { listener.onFailure(LicenseUtils.newComplianceException(XPackField.DATA_FRAME)); @@ -129,58 +118,19 @@ protected void masterOperation(Task task, Request request, ClusterState clusterS DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_TRANSFORM_EXISTS, transformId))); return; } - final String destIndex = config.getDestination().getIndex(); - Set concreteSourceIndexNames = new HashSet<>(); - for(String src : config.getSource().getIndex()) { - String[] concreteNames = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), src); - if (concreteNames.length == 0) { - listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING, src), - RestStatus.BAD_REQUEST)); - return; - } - if (Regex.simpleMatch(src, destIndex)) { - listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, destIndex, src), - RestStatus.BAD_REQUEST - )); - return; - } - concreteSourceIndexNames.addAll(Arrays.asList(concreteNames)); - } - - if (concreteSourceIndexNames.contains(destIndex)) { - listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, - destIndex, - Strings.arrayToCommaDelimitedString(config.getSource().getIndex())), - RestStatus.BAD_REQUEST - )); - return; - } - - final String[] concreteDest = - indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), destIndex); - - if (concreteDest.length > 1) { - listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_SINGLE_INDEX, destIndex), - RestStatus.BAD_REQUEST - )); - return; - } - if (concreteDest.length > 0 && concreteSourceIndexNames.contains(concreteDest[0])) { - listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, - concreteDest[0], - Strings.arrayToCommaDelimitedString(concreteSourceIndexNames.toArray(new String[0]))), - RestStatus.BAD_REQUEST - )); + try { + SourceDestValidator.validate(config, clusterState, indexNameExpressionResolver, request.isDeferValidation()); + } catch (ElasticsearchStatusException ex) { + listener.onFailure(ex); return; } // Early check to verify that the user can create the destination index and can read from the source - if (licenseState.isAuthAllowed()) { + if (licenseState.isAuthAllowed() && request.isDeferValidation() == false) { + final String destIndex = config.getDestination().getIndex(); + final String[] concreteDest = indexNameExpressionResolver.concreteIndexNames(clusterState, + IndicesOptions.lenientExpandOpen(), + config.getDestination().getIndex()); final String username = securityContext.getUser().principal(); List srcPrivileges = new ArrayList<>(2); srcPrivileges.add("read"); @@ -211,12 +161,12 @@ protected void masterOperation(Task task, Request request, ClusterState clusterS privRequest.clusterPrivileges(Strings.EMPTY_ARRAY); privRequest.indexPrivileges(sourceIndexPrivileges, destIndexPrivileges); ActionListener privResponseListener = ActionListener.wrap( - r -> handlePrivsResponse(username, config, r, listener), + r -> handlePrivsResponse(username, request, r, listener), listener::onFailure); client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); } else { // No security enabled, just create the transform - putDataFrame(config, listener); + putDataFrame(request, listener); } } @@ -226,11 +176,11 @@ protected ClusterBlockException checkBlock(PutDataFrameTransformAction.Request r } private void handlePrivsResponse(String username, - DataFrameTransformConfig config, + Request request, HasPrivilegesResponse privilegesResponse, - ActionListener listener) throws IOException { + ActionListener listener) { if (privilegesResponse.isCompleteMatch()) { - putDataFrame(config, listener); + putDataFrame(request, listener); } else { List indices = privilegesResponse.getIndexPrivileges() .stream() @@ -239,18 +189,18 @@ private void handlePrivsResponse(String username, listener.onFailure(Exceptions.authorizationError( "Cannot create data frame transform [{}] because user {} lacks all the required permissions for indices: {}", - config.getId(), + request.getConfig().getId(), username, indices)); } } - private void putDataFrame(DataFrameTransformConfig config, ActionListener listener) { + private void putDataFrame(Request request, ActionListener listener) { + final DataFrameTransformConfig config = request.getConfig(); final Pivot pivot = new Pivot(config.getPivotConfig()); - - // <5> Return the listener, or clean up destination index on failure. + // <3> Return to the listener ActionListener putTransformConfigurationListener = ActionListener.wrap( putTransformConfigurationResult -> { auditor.info(config.getId(), "Created data frame transform."); @@ -259,15 +209,27 @@ private void putDataFrame(DataFrameTransformConfig config, ActionListener Put our transform + // <2> Put our transform ActionListener pivotValidationListener = ActionListener.wrap( validationResult -> dataFrameTransformsConfigManager.putTransformConfiguration(config, putTransformConfigurationListener), validationException -> listener.onFailure( - new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, - validationException)) + new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + validationException)) ); - // <1> Validate our pivot - pivot.validate(client, config.getSource(), pivotValidationListener); + try { + pivot.validateConfig(); + } catch (Exception e) { + listener.onFailure( + new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + e)); + return; + } + + if (request.isDeferValidation()) { + pivotValidationListener.onResponse(true); + } else { + pivot.validateQuery(client, config.getSource(), pivotValidationListener); + } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java index 4b2a11a7d0258..c0f0bbc942e5a 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java @@ -45,6 +45,7 @@ import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.persistence.DataframeIndex; +import org.elasticsearch.xpack.dataframe.transforms.SourceDestValidator; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.io.IOException; @@ -86,11 +87,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected StartDataFrameTransformAction.Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected StartDataFrameTransformAction.Response read(StreamInput in) throws IOException { return new StartDataFrameTransformAction.Response(in); @@ -185,8 +181,10 @@ protected void masterOperation(Task ignoredTask, StartDataFrameTransformAction.R )); return; } + // Validate source and destination indices + SourceDestValidator.validate(config, clusterService.state(), indexNameExpressionResolver, false); - transformTaskHolder.set(createDataFrameTransform(config.getId(), config.getVersion())); + transformTaskHolder.set(createDataFrameTransform(config.getId(), config.getVersion(), config.getFrequency())); final String destinationIndex = config.getDestination().getIndex(); String[] dest = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), @@ -255,8 +253,8 @@ protected ClusterBlockException checkBlock(StartDataFrameTransformAction.Request return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } - private static DataFrameTransform createDataFrameTransform(String transformId, Version transformVersion) { - return new DataFrameTransform(transformId, transformVersion); + private static DataFrameTransform createDataFrameTransform(String transformId, Version transformVersion, TimeValue frequency) { + return new DataFrameTransform(transformId, transformVersion, frequency); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 35a9d19658345..c2ddcdb9bac2b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -21,23 +21,29 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; +import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; +import static org.elasticsearch.xpack.core.dataframe.DataFrameMessages.DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM; + public class TransportStopDataFrameTransformAction extends TransportTasksAction { @@ -63,6 +69,32 @@ public TransportStopDataFrameTransformAction(TransportService transportService, this.client = client; } + static void validateTaskState(ClusterState state, List transformIds, boolean isForce) { + PersistentTasksCustomMetaData tasks = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); + if (isForce == false && tasks != null) { + List failedTasks = new ArrayList<>(); + List failedReasons = new ArrayList<>(); + for (String transformId : transformIds) { + PersistentTasksCustomMetaData.PersistentTask dfTask = tasks.getTask(transformId); + if (dfTask != null + && dfTask.getState() instanceof DataFrameTransformState + && ((DataFrameTransformState) dfTask.getState()).getTaskState() == DataFrameTransformTaskState.FAILED) { + failedTasks.add(transformId); + failedReasons.add(((DataFrameTransformState) dfTask.getState()).getReason()); + } + } + if (failedTasks.isEmpty() == false) { + String msg = failedTasks.size() == 1 ? + DataFrameMessages.getMessage(DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + failedTasks.get(0), + failedReasons.get(0)) : + "Unable to stop data frame transforms. The following transforms are in a failed state " + + failedTasks + " with reasons " + failedReasons + ". Use force stop to stop the data frame transforms."; + throw new ElasticsearchStatusException(msg, RestStatus.CONFLICT); + } + } + } + @Override protected void doExecute(Task task, StopDataFrameTransformAction.Request request, ActionListener listener) { @@ -88,8 +120,9 @@ protected void doExecute(Task task, StopDataFrameTransformAction.Request request new PageParams(0, 10_000), request.isAllowNoMatch(), ActionListener.wrap(hitsAndIds -> { + validateTaskState(state, hitsAndIds.v2(), request.isForce()); request.setExpandedIds(new HashSet<>(hitsAndIds.v2())); - request.setNodes(DataFrameNodes.dataFrameTaskNodes(hitsAndIds.v2(), clusterService.state())); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(hitsAndIds.v2(), state)); super.doExecute(task, request, finalListener); }, listener::onFailure @@ -108,11 +141,14 @@ protected void taskOperation(StopDataFrameTransformAction.Request request, DataF } if (ids.contains(transformTask.getTransformId())) { + // This should not occur as we check that none of the tasks are in a failed state earlier + // Keep this check in here for insurance. if (transformTask.getState().getTaskState() == DataFrameTransformTaskState.FAILED && request.isForce() == false) { listener.onFailure( - new ElasticsearchStatusException("Unable to stop data frame transform [" + request.getId() - + "] as it is in a failed state with reason: [" + transformTask.getState().getReason() + - "]. Use force stop to stop the data frame transform.", + new ElasticsearchStatusException( + DataFrameMessages.getMessage(DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + request.getId(), + transformTask.getState().getReason()), RestStatus.CONFLICT)); return; } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java index d028d3248c8f6..831655163ab25 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java @@ -14,9 +14,8 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; @@ -94,7 +93,8 @@ public void getCheckpoint(DataFrameTransformConfig transformConfig, long checkpo // 1st get index to see the indexes the user has access to GetIndexRequest getIndexRequest = new GetIndexRequest() .indices(transformConfig.getSource().getIndex()) - .features(new GetIndexRequest.Feature[0]); + .features(new GetIndexRequest.Feature[0]) + .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, GetIndexAction.INSTANCE, getIndexRequest, ActionListener.wrap(getIndexResponse -> { @@ -105,7 +105,8 @@ public void getCheckpoint(DataFrameTransformConfig transformConfig, long checkpo IndicesStatsAction.INSTANCE, new IndicesStatsRequest() .indices(transformConfig.getSource().getIndex()) - .clear(), + .clear() + .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN), ActionListener.wrap( response -> { if (response.getFailedShards() != 0) { @@ -113,21 +114,18 @@ public void getCheckpoint(DataFrameTransformConfig transformConfig, long checkpo new CheckpointException("Source has [" + response.getFailedShards() + "] failed shards")); return; } - try { - Map checkpointsByIndex = extractIndexCheckPoints(response.getShards(), userIndices); - listener.onResponse(new DataFrameTransformCheckpoint(transformConfig.getId(), - timestamp, - checkpoint, - checkpointsByIndex, - timeUpperBound)); - } catch (CheckpointException checkpointException) { - listener.onFailure(checkpointException); - } + + Map checkpointsByIndex = extractIndexCheckPoints(response.getShards(), userIndices); + listener.onResponse(new DataFrameTransformCheckpoint(transformConfig.getId(), + timestamp, + checkpoint, + checkpointsByIndex, + timeUpperBound)); }, - listener::onFailure + e-> listener.onFailure(new CheckpointException("Failed to create checkpoint", e)) )); }, - listener::onFailure + e -> listener.onFailure(new CheckpointException("Failed to create checkpoint", e)) )); } @@ -223,38 +221,44 @@ static Map extractIndexCheckPoints(ShardStats[] shards, Set checkpoints = checkpointsByIndex.get(indexName); if (checkpoints.containsKey(shard.getShardRouting().getId())) { // there is already a checkpoint entry for this index/shard combination, check if they match - if (checkpoints.get(shard.getShardRouting().getId()) != shard.getSeqNoStats().getGlobalCheckpoint()) { + if (checkpoints.get(shard.getShardRouting().getId()) != globalCheckpoint) { throw new CheckpointException("Global checkpoints mismatch for index [" + indexName + "] between shards of id [" + shard.getShardRouting().getId() + "]"); } } else { // 1st time we see this shard for this index, add the entry for the shard - checkpoints.put(shard.getShardRouting().getId(), shard.getSeqNoStats().getGlobalCheckpoint()); + checkpoints.put(shard.getShardRouting().getId(), globalCheckpoint); } } else { // 1st time we see this index, create an entry for the index and add the shard checkpoint checkpointsByIndex.put(indexName, new TreeMap<>()); - checkpointsByIndex.get(indexName).put(shard.getShardRouting().getId(), shard.getSeqNoStats().getGlobalCheckpoint()); + checkpointsByIndex.get(indexName).put(shard.getShardRouting().getId(), globalCheckpoint); } } } + // checkpoint extraction is done in 2 steps: + // 1. GetIndexRequest to retrieve the indices the user has access to + // 2. IndicesStatsRequest to retrieve stats about indices + // between 1 and 2 indices could get deleted or created + if (logger.isDebugEnabled()) { + Set userIndicesClone = new HashSet<>(userIndices); + + userIndicesClone.removeAll(checkpointsByIndex.keySet()); + if (userIndicesClone.isEmpty() == false) { + logger.debug("Original set of user indices contained more indexes [{}]", userIndicesClone); + } + } + // create the final structure Map checkpointsByIndexReduced = new TreeMap<>(); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java index 125e61b5021e4..6b5b91a6d1cee 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java @@ -15,8 +15,6 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import java.io.IOException; - public class RestDeleteDataFrameTransformAction extends BaseRestHandler { public RestDeleteDataFrameTransformAction(Settings settings, RestController controller) { @@ -25,13 +23,14 @@ public RestDeleteDataFrameTransformAction(Settings settings, RestController cont } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { if (restRequest.hasContent()) { throw new IllegalArgumentException("delete data frame transforms requests can not have a request body"); } String id = restRequest.param(DataFrameField.ID.getPreferredName()); - DeleteDataFrameTransformAction.Request request = new DeleteDataFrameTransformAction.Request(id); + boolean force = restRequest.paramAsBoolean(DataFrameField.FORCE.getPreferredName(), false); + DeleteDataFrameTransformAction.Request request = new DeleteDataFrameTransformAction.Request(id, force); return channel -> client.execute(DeleteDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java index 2874894d879ab..103200244561d 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java @@ -35,7 +35,8 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String id = restRequest.param(DataFrameField.ID.getPreferredName()); XContentParser parser = restRequest.contentParser(); - PutDataFrameTransformAction.Request request = PutDataFrameTransformAction.Request.fromXContent(parser, id); + boolean deferValidation = restRequest.paramAsBoolean(DataFrameField.DEFER_VALIDATION.getPreferredName(), false); + PutDataFrameTransformAction.Request request = PutDataFrameTransformAction.Request.fromXContent(parser, id, deferValidation); return channel -> client.execute(PutDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java index 1516b415d2a7d..20e8218c274e6 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -23,6 +24,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; @@ -47,7 +49,23 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer, DataFrameIndexerTransformStats> { +public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer { + + /** + * RunState is an internal (non-persisted) state that controls the internal logic + * which query filters to run and which index requests to send + */ + private enum RunState { + // do a complete query/index, this is used for batch data frames and for bootstraping (1st run) + FULL_RUN, + + // Partial run modes in 2 stages: + // identify buckets that have changed + PARTIAL_RUN_IDENTIFY_CHANGES, + + // recalculate buckets based on the update list + PARTIAL_RUN_APPLY_CHANGES + } public static final int MINIMUM_PAGE_SIZE = 10; public static final String COMPOSITE_AGGREGATION_NAME = "_data_frame"; @@ -61,24 +79,34 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer> changedBuckets; + private volatile Map changedBucketsAfterKey; public DataFrameIndexer(Executor executor, DataFrameAuditor auditor, DataFrameTransformConfig transformConfig, Map fieldMappings, AtomicReference initialState, - Map initialPosition, + DataFrameIndexerPosition initialPosition, DataFrameIndexerTransformStats jobStats, DataFrameTransformProgress transformProgress, - DataFrameTransformCheckpoint inProgressOrLastCheckpoint) { + DataFrameTransformCheckpoint lastCheckpoint, + DataFrameTransformCheckpoint nextCheckpoint) { super(executor, initialState, initialPosition, jobStats); this.auditor = Objects.requireNonNull(auditor); this.transformConfig = ExceptionsHelper.requireNonNull(transformConfig, "transformConfig"); this.fieldMappings = ExceptionsHelper.requireNonNull(fieldMappings, "fieldMappings"); this.progress = transformProgress; - this.inProgressOrLastCheckpoint = inProgressOrLastCheckpoint; + this.lastCheckpoint = lastCheckpoint; + this.nextCheckpoint = nextCheckpoint; + // give runState a default + this.runState = RunState.FULL_RUN; } protected abstract void failIndexer(String message); @@ -117,6 +145,8 @@ protected void onStart(long now, ActionListener listener) { if (pageSize == 0) { pageSize = pivot.getInitialPageSize(); } + + runState = determineRunStateAtStart(); listener.onResponse(null); } catch (Exception e) { listener.onFailure(e); @@ -136,24 +166,95 @@ protected void onFinish(ActionListener listener) { } @Override - protected IterationResult> doProcess(SearchResponse searchResponse) { + protected IterationResult doProcess(SearchResponse searchResponse) { final CompositeAggregation agg = searchResponse.getAggregations().get(COMPOSITE_AGGREGATION_NAME); + switch (runState) { + case FULL_RUN: + return processBuckets(agg); + case PARTIAL_RUN_APPLY_CHANGES: + return processPartialBucketUpdates(agg); + case PARTIAL_RUN_IDENTIFY_CHANGES: + return processChangedBuckets(agg); + + default: + // Any other state is a bug, should not happen + logger.warn("Encountered unexpected run state [" + runState + "]"); + throw new IllegalStateException("DataFrame indexer job encountered an illegal state [" + runState + "]"); + } + } + + private IterationResult processBuckets(final CompositeAggregation agg) { // we reached the end if (agg.getBuckets().isEmpty()) { return new IterationResult<>(Collections.emptyList(), null, true); } long docsBeforeProcess = getStats().getNumDocuments(); - IterationResult> result = new IterationResult<>(processBucketsToIndexRequests(agg).collect(Collectors.toList()), - agg.afterKey(), - agg.getBuckets().isEmpty()); + + DataFrameIndexerPosition oldPosition = getPosition(); + DataFrameIndexerPosition newPosition = new DataFrameIndexerPosition(agg.afterKey(), + oldPosition != null ? getPosition().getBucketsPosition() : null); + + IterationResult result = new IterationResult<>( + processBucketsToIndexRequests(agg).collect(Collectors.toList()), + newPosition, + agg.getBuckets().isEmpty()); + if (progress != null) { progress.docsProcessed(getStats().getNumDocuments() - docsBeforeProcess); } + return result; } + private IterationResult processPartialBucketUpdates(final CompositeAggregation agg) { + // we reached the end + if (agg.getBuckets().isEmpty()) { + // cleanup changed Buckets + changedBuckets = null; + + // reset the runState to fetch changed buckets + runState = RunState.PARTIAL_RUN_IDENTIFY_CHANGES; + // advance the cursor for changed bucket detection + return new IterationResult<>(Collections.emptyList(), + new DataFrameIndexerPosition(null, changedBucketsAfterKey), false); + } + + return processBuckets(agg); + } + + + private IterationResult processChangedBuckets(final CompositeAggregation agg) { + // initialize the map of changed buckets, the map might be empty if source do not require/implement + // changed bucket detection + changedBuckets = pivot.initialIncrementalBucketUpdateMap(); + + // reached the end? + if (agg.getBuckets().isEmpty()) { + // reset everything and return the end marker + changedBuckets = null; + changedBucketsAfterKey = null; + return new IterationResult<>(Collections.emptyList(), null, true); + } + // else + + // collect all buckets that require the update + agg.getBuckets().stream().forEach(bucket -> { + bucket.getKey().forEach((k, v) -> { + changedBuckets.get(k).add(v.toString()); + }); + }); + + // remember the after key but do not store it in the state yet (in the failure we need to retrieve it again) + changedBucketsAfterKey = agg.afterKey(); + + // reset the runState to fetch the partial updates next + runState = RunState.PARTIAL_RUN_APPLY_CHANGES; + + return new IterationResult<>(Collections.emptyList(), getPosition(), false); + } + /* * Parses the result and creates a stream of indexable documents * @@ -197,43 +298,129 @@ private Stream processBucketsToIndexRequests(CompositeAggregation } protected QueryBuilder buildFilterQuery() { + assert nextCheckpoint != null; + QueryBuilder pivotQueryBuilder = getConfig().getSource().getQueryConfig().getQuery(); DataFrameTransformConfig config = getConfig(); - if (config.getSyncConfig() != null) { - if (inProgressOrLastCheckpoint == null) { - throw new RuntimeException("in progress checkpoint not found"); - } + if (this.isContinuous()) { BoolQueryBuilder filteredQuery = new BoolQueryBuilder() - .filter(pivotQueryBuilder) - .filter(config.getSyncConfig().getRangeQuery(inProgressOrLastCheckpoint)); + .filter(pivotQueryBuilder); - if (changedBuckets != null && changedBuckets.isEmpty() == false) { - QueryBuilder pivotFilter = pivot.filterBuckets(changedBuckets); - if (pivotFilter != null) { - filteredQuery.filter(pivotFilter); - } + if (lastCheckpoint != null) { + filteredQuery.filter(config.getSyncConfig().getRangeQuery(lastCheckpoint, nextCheckpoint)); + } else { + filteredQuery.filter(config.getSyncConfig().getRangeQuery(nextCheckpoint)); } - - logger.trace("running filtered query: {}", filteredQuery); return filteredQuery; - } else { - return pivotQueryBuilder; } + + return pivotQueryBuilder; } @Override protected SearchRequest buildSearchRequest() { - SearchRequest searchRequest = new SearchRequest(getConfig().getSource().getIndex()); + assert nextCheckpoint != null; + + SearchRequest searchRequest = new SearchRequest(getConfig().getSource().getIndex()) + .allowPartialSearchResults(false) + .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder() - .aggregation(pivot.buildAggregation(getPosition(), pageSize)) - .size(0) - .query(buildFilterQuery()); + .size(0); + + switch (runState) { + case FULL_RUN: + buildFullRunQuery(sourceBuilder); + break; + case PARTIAL_RUN_IDENTIFY_CHANGES: + buildChangedBucketsQuery(sourceBuilder); + break; + case PARTIAL_RUN_APPLY_CHANGES: + buildPartialUpdateQuery(sourceBuilder); + break; + default: + // Any other state is a bug, should not happen + logger.warn("Encountered unexpected run state [" + runState + "]"); + throw new IllegalStateException("DataFrame indexer job encountered an illegal state [" + runState + "]"); + } + searchRequest.source(sourceBuilder); return searchRequest; } + private SearchSourceBuilder buildFullRunQuery(SearchSourceBuilder sourceBuilder) { + DataFrameIndexerPosition position = getPosition(); + + sourceBuilder.aggregation(pivot.buildAggregation(position != null ? position.getIndexerPosition() : null, pageSize)); + DataFrameTransformConfig config = getConfig(); + + QueryBuilder pivotQueryBuilder = config.getSource().getQueryConfig().getQuery(); + if (isContinuous()) { + BoolQueryBuilder filteredQuery = new BoolQueryBuilder() + .filter(pivotQueryBuilder) + .filter(config.getSyncConfig() + .getRangeQuery(nextCheckpoint)); + sourceBuilder.query(filteredQuery); + } else { + sourceBuilder.query(pivotQueryBuilder); + } + + logger.trace("running full run query: {}", sourceBuilder); + + return sourceBuilder; + } + + private SearchSourceBuilder buildChangedBucketsQuery(SearchSourceBuilder sourceBuilder) { + assert isContinuous(); + + DataFrameIndexerPosition position = getPosition(); + + CompositeAggregationBuilder changesAgg = pivot.buildIncrementalBucketUpdateAggregation(pageSize); + changesAgg.aggregateAfter(position != null ? position.getBucketsPosition() : null); + sourceBuilder.aggregation(changesAgg); + + QueryBuilder pivotQueryBuilder = getConfig().getSource().getQueryConfig().getQuery(); + + DataFrameTransformConfig config = getConfig(); + BoolQueryBuilder filteredQuery = new BoolQueryBuilder(). + filter(pivotQueryBuilder). + filter(config.getSyncConfig().getRangeQuery(lastCheckpoint, nextCheckpoint)); + + sourceBuilder.query(filteredQuery); + + logger.trace("running changes query {}", sourceBuilder); + return sourceBuilder; + } + + private SearchSourceBuilder buildPartialUpdateQuery(SearchSourceBuilder sourceBuilder) { + assert isContinuous(); + + DataFrameIndexerPosition position = getPosition(); + + sourceBuilder.aggregation(pivot.buildAggregation(position != null ? position.getIndexerPosition() : null, pageSize)); + DataFrameTransformConfig config = getConfig(); + + QueryBuilder pivotQueryBuilder = config.getSource().getQueryConfig().getQuery(); + + BoolQueryBuilder filteredQuery = new BoolQueryBuilder() + .filter(pivotQueryBuilder) + .filter(config.getSyncConfig() + .getRangeQuery(nextCheckpoint)); + + if (changedBuckets != null && changedBuckets.isEmpty() == false) { + QueryBuilder pivotFilter = pivot.filterBuckets(changedBuckets); + if (pivotFilter != null) { + filteredQuery.filter(pivotFilter); + } + } + + sourceBuilder.query(filteredQuery); + logger.trace("running partial update query: {}", sourceBuilder); + + return sourceBuilder; + } + /** * Handle the circuit breaking case: A search consumed to much memory and got aborted. * @@ -272,82 +459,19 @@ protected boolean handleCircuitBreakingException(Exception e) { return true; } - protected void getChangedBuckets(DataFrameTransformCheckpoint oldCheckpoint, - DataFrameTransformCheckpoint newCheckpoint, - ActionListener>> listener) { - - ActionListener>> wrappedListener = ActionListener.wrap( - r -> { - this.inProgressOrLastCheckpoint = newCheckpoint; - this.changedBuckets = r; - listener.onResponse(r); - }, - listener::onFailure - ); - // initialize the map of changed buckets, the map might be empty if source do not require/implement - // changed bucket detection - Map> keys = pivot.initialIncrementalBucketUpdateMap(); - if (keys.isEmpty()) { - logger.trace("This data frame does not implement changed bucket detection, returning"); - wrappedListener.onResponse(null); - return; + private RunState determineRunStateAtStart() { + // either 1st run or not a continuous data frame + if (nextCheckpoint.getCheckpoint() == 1 || isContinuous() == false) { + return RunState.FULL_RUN; } - SearchRequest searchRequest = new SearchRequest(getConfig().getSource().getIndex()); - SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); - - // we do not need the sub-aggs - CompositeAggregationBuilder changesAgg = pivot.buildIncrementalBucketUpdateAggregation(pageSize); - sourceBuilder.aggregation(changesAgg); - sourceBuilder.size(0); - - QueryBuilder pivotQueryBuilder = getConfig().getSource().getQueryConfig().getQuery(); - - DataFrameTransformConfig config = getConfig(); - if (config.getSyncConfig() != null) { - BoolQueryBuilder filteredQuery = new BoolQueryBuilder(). - filter(pivotQueryBuilder). - filter(config.getSyncConfig().getRangeQuery(oldCheckpoint, newCheckpoint)); - - logger.trace("Gathering changes using query {}", filteredQuery); - sourceBuilder.query(filteredQuery); - } else { - logger.trace("No sync configured"); - wrappedListener.onResponse(null); - return; + // if incremental update is not supported, do a full run + if (pivot.supportsIncrementalBucketUpdate() == false) { + return RunState.FULL_RUN; } - searchRequest.source(sourceBuilder); - searchRequest.allowPartialSearchResults(false); - - collectChangedBuckets(searchRequest, changesAgg, keys, ActionListener.wrap(wrappedListener::onResponse, e -> { - // fall back if bucket collection failed - logger.error("Failed to retrieve changed buckets, fall back to complete retrieval", e); - wrappedListener.onResponse(null); - })); - } - - void collectChangedBuckets(SearchRequest searchRequest, CompositeAggregationBuilder changesAgg, Map> keys, - ActionListener>> finalListener) { - - // re-using the existing search hook - doNextSearch(searchRequest, ActionListener.wrap(searchResponse -> { - final CompositeAggregation agg = searchResponse.getAggregations().get(COMPOSITE_AGGREGATION_NAME); - - agg.getBuckets().stream().forEach(bucket -> { - bucket.getKey().forEach((k, v) -> { - keys.get(k).add(v.toString()); - }); - }); - - if (agg.getBuckets().isEmpty()) { - finalListener.onResponse(keys); - } else { - // adjust the after key - changesAgg.aggregateAfter(agg.afterKey()); - collectChangedBuckets(searchRequest, changesAgg, keys, finalListener); - } - }, finalListener::onFailure)); + // continuous mode: we need to get the changed buckets first + return RunState.PARTIAL_RUN_IDENTIFY_CHANGES; } /** diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index ded3a0324e721..2af693d174ffa 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -17,7 +18,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -32,6 +35,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.dataframe.DataFrame; @@ -59,13 +63,16 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx private final SchedulerEngine schedulerEngine; private final ThreadPool threadPool; private final DataFrameAuditor auditor; + private volatile int numFailureRetries; public DataFrameTransformPersistentTasksExecutor(Client client, DataFrameTransformsConfigManager transformsConfigManager, DataFrameTransformsCheckpointService dataFrameTransformsCheckpointService, SchedulerEngine schedulerEngine, DataFrameAuditor auditor, - ThreadPool threadPool) { + ThreadPool threadPool, + ClusterService clusterService, + Settings settings) { super(DataFrameField.TASK_NAME, DataFrame.TASK_THREAD_POOL_NAME); this.client = client; this.transformsConfigManager = transformsConfigManager; @@ -73,6 +80,9 @@ public DataFrameTransformPersistentTasksExecutor(Client client, this.schedulerEngine = schedulerEngine; this.auditor = auditor; this.threadPool = threadPool; + this.numFailureRetries = DataFrameTransformTask.NUM_FAILURE_RETRIES_SETTING.get(settings); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(DataFrameTransformTask.NUM_FAILURE_RETRIES_SETTING, this::setNumFailureRetries); } @Override @@ -111,6 +121,12 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTr final String transformId = params.getId(); final DataFrameTransformTask buildTask = (DataFrameTransformTask) task; final DataFrameTransformState transformPTaskState = (DataFrameTransformState) state; + // If the transform is failed then the Persistent Task Service will + // try to restart it on a node restart. Exiting here leaves the + // transform in the failed state and it must be force closed. + if (transformPTaskState != null && transformPTaskState.getTaskState() == DataFrameTransformTaskState.FAILED) { + return; + } final DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder = new DataFrameTransformTask.ClientDataFrameIndexerBuilder(transformId) @@ -119,14 +135,53 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTr .setTransformsCheckpointService(dataFrameTransformsCheckpointService) .setTransformsConfigManager(transformsConfigManager); + final SetOnce stateHolder = new SetOnce<>(); + ActionListener startTaskListener = ActionListener.wrap( response -> logger.info("Successfully completed and scheduled task in node operation"), failure -> logger.error("Failed to start task ["+ transformId +"] in node operation", failure) ); - Long previousCheckpoint = transformPTaskState != null ? transformPTaskState.getCheckpoint() : null; + // <5> load next checkpoint + ActionListener getTransformNextCheckpointListener = ActionListener.wrap( + nextCheckpoint -> { + indexerBuilder.setNextCheckpoint(nextCheckpoint); + + final long lastCheckpoint = stateHolder.get().getCheckpoint(); + + logger.trace("[{}] No next checkpoint found, starting the task", transformId); + startTask(buildTask, indexerBuilder, lastCheckpoint, startTaskListener); + }, + error -> { + // TODO: do not use the same error message as for loading the last checkpoint + String msg = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); + logger.error(msg, error); + markAsFailed(buildTask, msg); + } + ); + + // <4> load last checkpoint + ActionListener getTransformLastCheckpointListener = ActionListener.wrap( + lastCheckpoint -> { + indexerBuilder.setLastCheckpoint(lastCheckpoint); + + final long nextCheckpoint = stateHolder.get().getInProgressCheckpoint(); - // <4> Set the previous stats (if they exist), initialize the indexer, start the task (If it is STOPPED) + if (nextCheckpoint > 0) { + transformsConfigManager.getTransformCheckpoint(transformId, nextCheckpoint, getTransformNextCheckpointListener); + } else { + logger.trace("[{}] No next checkpoint found, starting the task", transformId); + startTask(buildTask, indexerBuilder, lastCheckpoint.getCheckpoint(), startTaskListener); + } + }, + error -> { + String msg = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); + logger.error(msg, error); + markAsFailed(buildTask, msg); + } + ); + + // <3> Set the previous stats (if they exist), initialize the indexer, start the task (If it is STOPPED) // Since we don't create the task until `_start` is called, if we see that the task state is stopped, attempt to start // Schedule execution regardless ActionListener transformStatsActionListener = ActionListener.wrap( @@ -141,27 +196,26 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTr stateAndStats.getTransformState(), stateAndStats.getTransformState().getPosition()); - final Long checkpoint = stateAndStats.getTransformState().getCheckpoint(); - startTask(buildTask, indexerBuilder, checkpoint, startTaskListener); + stateHolder.set(stateAndStats.getTransformState()); + final long lastCheckpoint = stateHolder.get().getCheckpoint(); + + if (lastCheckpoint == 0) { + logger.trace("[{}] No checkpoint found, starting the task", transformId); + startTask(buildTask, indexerBuilder, lastCheckpoint, startTaskListener); + } else { + logger.trace ("[{}] Restore last checkpoint: [{}]", transformId, lastCheckpoint); + transformsConfigManager.getTransformCheckpoint(transformId, lastCheckpoint, getTransformLastCheckpointListener); + } }, error -> { if (error instanceof ResourceNotFoundException == false) { - logger.warn("Unable to load previously persisted statistics for transform [" + params.getId() + "]", error); + String msg = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_STATE, transformId); + logger.error(msg, error); + markAsFailed(buildTask, msg); } - startTask(buildTask, indexerBuilder, previousCheckpoint, startTaskListener); - } - ); - // <3> set the in progress checkpoint for the indexer, get the in progress checkpoint - ActionListener getTransformCheckpointListener = ActionListener.wrap( - cp -> { - indexerBuilder.setInProgressOrLastCheckpoint(cp); - transformsConfigManager.getTransformStats(transformId, transformStatsActionListener); - }, - error -> { - String msg = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); - logger.error(msg, error); - markAsFailed(buildTask, msg); + logger.trace("[{}] No stats found(new transform), starting the task", transformId); + startTask(buildTask, indexerBuilder, null, startTaskListener); } ); @@ -169,17 +223,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTr ActionListener> getFieldMappingsListener = ActionListener.wrap( fieldMappings -> { indexerBuilder.setFieldMappings(fieldMappings); - - long inProgressCheckpoint = transformPTaskState == null ? 0L : - Math.max(transformPTaskState.getCheckpoint(), transformPTaskState.getInProgressCheckpoint()); - - logger.debug("Restore in progress or last checkpoint: {}", inProgressCheckpoint); - - if (inProgressCheckpoint == 0) { - getTransformCheckpointListener.onResponse(DataFrameTransformCheckpoint.EMPTY); - } else { - transformsConfigManager.getTransformCheckpoint(transformId, inProgressCheckpoint, getTransformCheckpointListener); - } + transformsConfigManager.getTransformStats(transformId, transformStatsActionListener); }, error -> { String msg = DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UNABLE_TO_GATHER_FIELD_MAPPINGS, @@ -249,7 +293,11 @@ private void startTask(DataFrameTransformTask buildTask, Long previousCheckpoint, ActionListener listener) { buildTask.initializeIndexer(indexerBuilder); - buildTask.start(previousCheckpoint, listener); + buildTask.setNumFailureRetries(numFailureRetries).start(previousCheckpoint, listener); + } + + private void setNumFailureRetries(int numFailureRetries) { + this.numFailureRetries = numFailureRetries; } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index ea67da79620c4..59db069a7dca5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -20,6 +20,8 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -29,6 +31,7 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction.Response; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; @@ -49,7 +52,6 @@ import java.util.Arrays; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -59,11 +61,19 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements SchedulerEngine.Listener { - // interval the scheduler sends an event - private static final int SCHEDULER_NEXT_MILLISECONDS = 10000; + // Default interval the scheduler sends an event if the config does not specify a frequency + private static final long SCHEDULER_NEXT_MILLISECONDS = 60000; private static final Logger logger = LogManager.getLogger(DataFrameTransformTask.class); - // TODO consider moving to dynamic cluster setting - private static final int MAX_CONTINUOUS_FAILURES = 10; + private static final int DEFAULT_FAILURE_RETRIES = 10; + private volatile int numFailureRetries = DEFAULT_FAILURE_RETRIES; + // How many times the transform task can retry on an non-critical failure + public static final Setting NUM_FAILURE_RETRIES_SETTING = Setting.intSetting( + "xpack.data_frame.num_transform_failure_retries", + DEFAULT_FAILURE_RETRIES, + 0, + 100, + Setting.Property.NodeScope, + Setting.Property.Dynamic); private static final IndexerState[] RUNNING_STATES = new IndexerState[]{IndexerState.STARTED, IndexerState.INDEXING}; public static final String SCHEDULE_NAME = DataFrameField.TASK_NAME + "/schedule"; @@ -71,7 +81,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S private final SchedulerEngine schedulerEngine; private final ThreadPool threadPool; private final DataFrameAuditor auditor; - private final Map initialPosition; + private final DataFrameIndexerPosition initialPosition; private final IndexerState initialIndexerState; private final SetOnce indexer = new SetOnce<>(); @@ -94,7 +104,7 @@ public DataFrameTransformTask(long id, String type, String action, TaskId parent DataFrameTransformTaskState initialTaskState = DataFrameTransformTaskState.STOPPED; String initialReason = null; long initialGeneration = 0; - Map initialPosition = null; + DataFrameIndexerPosition initialPosition = null; if (state != null) { initialTaskState = state.getTaskState(); initialReason = state.getReason(); @@ -252,6 +262,7 @@ public synchronized void stop() { } IndexerState state = getIndexer().stop(); + stateReason.set(null); if (state == IndexerState.STOPPED) { getIndexer().onStop(); getIndexer().doSaveState(state, getIndexer().getPosition(), () -> {}); @@ -318,16 +329,33 @@ void persistStateToClusterState(DataFrameTransformState state, } synchronized void markAsFailed(String reason, ActionListener listener) { - taskState.set(DataFrameTransformTaskState.FAILED); - stateReason.set(reason); auditor.error(transform.getId(), reason); + // We should not keep retrying. Either the task will be stopped, or started + // If it is started again, it is registered again. + deregisterSchedulerJob(); + DataFrameTransformState newState = new DataFrameTransformState( + DataFrameTransformTaskState.FAILED, + initialIndexerState, + initialPosition, + currentCheckpoint.get(), + reason, + getIndexer() == null ? null : getIndexer().getProgress()); // Even though the indexer information is persisted to an index, we still need DataFrameTransformTaskState in the clusterstate // This keeps track of STARTED, FAILED, STOPPED // This is because a FAILED state can occur because we cannot read the config from the internal index, which would imply that // we could not read the previous state information from said index. - persistStateToClusterState(getState(), ActionListener.wrap( - r -> listener.onResponse(null), - listener::onFailure + persistStateToClusterState(newState, ActionListener.wrap( + r -> { + taskState.set(DataFrameTransformTaskState.FAILED); + stateReason.set(reason); + listener.onResponse(null); + }, + e -> { + logger.error("Failed to set task state as failed to cluster state", e); + taskState.set(DataFrameTransformTaskState.FAILED); + stateReason.set(reason); + listener.onFailure(e); + } )); } @@ -346,6 +374,15 @@ public synchronized void onCancelled() { } } + public DataFrameTransformTask setNumFailureRetries(int numFailureRetries) { + this.numFailureRetries = numFailureRetries; + return this; + } + + public int getNumFailureRetries() { + return numFailureRetries; + } + private void registerWithSchedulerJob() { schedulerEngine.register(this); final SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(schedulerJobName(), next()); @@ -363,7 +400,8 @@ private String schedulerJobName() { private SchedulerEngine.Schedule next() { return (startTime, now) -> { - return now + SCHEDULER_NEXT_MILLISECONDS; + TimeValue frequency = transform.getFrequency(); + return now + (frequency == null ? SCHEDULER_NEXT_MILLISECONDS : frequency.getMillis()); }; } @@ -381,9 +419,10 @@ static class ClientDataFrameIndexerBuilder { private DataFrameTransformConfig transformConfig; private DataFrameIndexerTransformStats initialStats; private IndexerState indexerState = IndexerState.STOPPED; - private Map initialPosition; + private DataFrameIndexerPosition initialPosition; private DataFrameTransformProgress progress; - private DataFrameTransformCheckpoint inProgressOrLastCheckpoint; + private DataFrameTransformCheckpoint lastCheckpoint; + private DataFrameTransformCheckpoint nextCheckpoint; ClientDataFrameIndexerBuilder(String transformId) { this.transformId = transformId; @@ -402,7 +441,8 @@ ClientDataFrameIndexer build(DataFrameTransformTask parentTask) { this.transformConfig, this.fieldMappings, this.progress, - this.inProgressOrLastCheckpoint, + this.lastCheckpoint, + this.nextCheckpoint, parentTask); } @@ -455,7 +495,7 @@ ClientDataFrameIndexerBuilder setIndexerState(IndexerState indexerState) { return this; } - ClientDataFrameIndexerBuilder setInitialPosition(Map initialPosition) { + ClientDataFrameIndexerBuilder setInitialPosition(DataFrameIndexerPosition initialPosition) { this.initialPosition = initialPosition; return this; } @@ -465,16 +505,21 @@ ClientDataFrameIndexerBuilder setProgress(DataFrameTransformProgress progress) { return this; } - ClientDataFrameIndexerBuilder setInProgressOrLastCheckpoint(DataFrameTransformCheckpoint inProgressOrLastCheckpoint) { - this.inProgressOrLastCheckpoint = inProgressOrLastCheckpoint; + ClientDataFrameIndexerBuilder setLastCheckpoint(DataFrameTransformCheckpoint lastCheckpoint) { + this.lastCheckpoint = lastCheckpoint; + return this; + } + + ClientDataFrameIndexerBuilder setNextCheckpoint(DataFrameTransformCheckpoint nextCheckpoint) { + this.nextCheckpoint = nextCheckpoint; return this; } } static class ClientDataFrameIndexer extends DataFrameIndexer { - private static final int ON_FINISH_AUDIT_FREQUENCY = 1000; - + private long logEvery = 1; + private long logCount = 0; private final Client client; private final DataFrameTransformsConfigManager transformsConfigManager; private final DataFrameTransformsCheckpointService transformsCheckpointService; @@ -489,14 +534,15 @@ static class ClientDataFrameIndexer extends DataFrameIndexer { DataFrameTransformsConfigManager transformsConfigManager, DataFrameTransformsCheckpointService transformsCheckpointService, AtomicReference initialState, - Map initialPosition, + DataFrameIndexerPosition initialPosition, Client client, DataFrameAuditor auditor, DataFrameIndexerTransformStats initialStats, DataFrameTransformConfig transformConfig, Map fieldMappings, DataFrameTransformProgress transformProgress, - DataFrameTransformCheckpoint inProgressOrLastCheckpoint, + DataFrameTransformCheckpoint lastCheckpoint, + DataFrameTransformCheckpoint nextCheckpoint, DataFrameTransformTask parentTask) { super(ExceptionsHelper.requireNonNull(parentTask, "parentTask") .threadPool @@ -508,7 +554,8 @@ static class ClientDataFrameIndexer extends DataFrameIndexer { initialPosition, initialStats == null ? new DataFrameIndexerTransformStats(transformId) : initialStats, transformProgress, - inProgressOrLastCheckpoint); + lastCheckpoint, + nextCheckpoint); this.transformId = ExceptionsHelper.requireNonNull(transformId, "transformId"); this.transformsConfigManager = ExceptionsHelper.requireNonNull(transformsConfigManager, "transformsConfigManager"); this.transformsCheckpointService = ExceptionsHelper.requireNonNull(transformsCheckpointService, @@ -524,9 +571,9 @@ protected void onStart(long now, ActionListener listener) { // Since multiple checkpoints can be executed in the task while it is running on the same node, we need to gather // the progress here, and not in the executor. if (initialRun()) { - ActionListener>> changedBucketsListener = ActionListener.wrap( - r -> { - TransformProgressGatherer.getInitialProgress(this.client, buildFilterQuery(), getConfig(), ActionListener.wrap( + createCheckpoint(ActionListener.wrap(cp -> { + nextCheckpoint = cp; + TransformProgressGatherer.getInitialProgress(this.client, buildFilterQuery(), getConfig(), ActionListener.wrap( newProgress -> { logger.trace("[{}] reset the progress from [{}] to [{}]", transformId, progress, newProgress); progress = newProgress; @@ -538,20 +585,6 @@ protected void onStart(long now, ActionListener listener) { super.onStart(now, listener); } )); - }, - listener::onFailure - ); - - createCheckpoint(ActionListener.wrap(cp -> { - DataFrameTransformCheckpoint oldCheckpoint = inProgressOrLastCheckpoint; - if (oldCheckpoint.isEmpty()) { - // this is the 1st run, accept the new in progress checkpoint and go on - inProgressOrLastCheckpoint = cp; - changedBucketsListener.onResponse(null); - } else { - logger.debug ("Getting changes from {} to {}", oldCheckpoint.getTimeUpperBound(), cp.getTimeUpperBound()); - getChangedBuckets(oldCheckpoint, cp, changedBucketsListener); - } }, listener::onFailure)); } else { super.onStart(now, listener); @@ -594,26 +627,36 @@ protected void doNextBulk(BulkRequest request, ActionListener next BulkAction.INSTANCE, request, ActionListener.wrap(bulkResponse -> { - if (bulkResponse.hasFailures() && auditBulkFailures) { + if (bulkResponse.hasFailures()) { int failureCount = 0; for(BulkItemResponse item : bulkResponse.getItems()) { if (item.isFailed()) { failureCount++; } + // TODO gather information on irrecoverable failures and update isIrrecoverableFailure } - auditor.warning(transformId, - "Experienced at least [" + - failureCount + - "] bulk index failures. See the logs of the node running the transform for details. " + - bulkResponse.buildFailureMessage()); - auditBulkFailures = false; + if (auditBulkFailures) { + auditor.warning(transformId, + "Experienced at least [" + + failureCount + + "] bulk index failures. See the logs of the node running the transform for details. " + + bulkResponse.buildFailureMessage()); + auditBulkFailures = false; + } + // This calls AsyncTwoPhaseIndexer#finishWithIndexingFailure + // It increments the indexing failure, and then calls the `onFailure` logic + nextPhase.onFailure( + new BulkIndexingException("Bulk index experienced failures. " + + "See the logs of the node running the transform for details.")); + } else { + auditBulkFailures = true; + nextPhase.onResponse(bulkResponse); } - nextPhase.onResponse(bulkResponse); }, nextPhase::onFailure)); } @Override - protected void doSaveState(IndexerState indexerState, Map position, Runnable next) { + protected void doSaveState(IndexerState indexerState, DataFrameIndexerPosition position, Runnable next) { if (indexerState.equals(IndexerState.ABORTING)) { // If we're aborting, just invoke `next` (which is likely an onFailure handler) next.run(); @@ -696,11 +739,15 @@ protected void onFailure(Exception exc) { @Override protected void onFinish(ActionListener listener) { try { + // TODO: needs cleanup super is called with a listener, but listener.onResponse is called below + // super.onFinish() fortunately ignores the listener super.onFinish(listener); long checkpoint = transformTask.currentCheckpoint.getAndIncrement(); + lastCheckpoint = nextCheckpoint; + nextCheckpoint = null; // Reset our failure count as we have finished and may start again with a new checkpoint failureCount.set(0); - if (checkpoint % ON_FINISH_AUDIT_FREQUENCY == 0) { + if (shouldAuditOnFinish(checkpoint)) { auditor.info(transformTask.getTransformId(), "Finished indexing for data frame transform checkpoint [" + checkpoint + "]."); } @@ -713,6 +760,26 @@ protected void onFinish(ActionListener listener) { } } + /** + * Indicates if an audit message should be written when onFinish is called for the given checkpoint + * We audit the first checkpoint, and then every 10 checkpoints until completedCheckpoint == 99 + * Then we audit every 100, until completedCheckpoint == 999 + * + * Then we always audit every 1_000 checkpoints + * + * @param completedCheckpoint The checkpoint that was just completed + * @return {@code true} if an audit message should be written + */ + protected boolean shouldAuditOnFinish(long completedCheckpoint) { + if (++logCount % logEvery != 0) { + return false; + } + int log10Checkpoint = (int) Math.floor(Math.log10(completedCheckpoint + 1)); + logEvery = log10Checkpoint >= 3 ? 1_000 : (int)Math.pow(10.0, log10Checkpoint); + logCount = 0; + return true; + } + @Override protected void onStop() { auditor.info(transformConfig.getId(), "Data frame transform has stopped."); @@ -754,7 +821,7 @@ public boolean sourceHasChanged() { SetOnce changed = new SetOnce<>(); transformsCheckpointService.getCheckpoint(transformConfig, new LatchedActionListener<>(ActionListener.wrap( cp -> { - long behind = DataFrameTransformCheckpoint.getBehind(inProgressOrLastCheckpoint, cp); + long behind = DataFrameTransformCheckpoint.getBehind(lastCheckpoint, cp); if (behind > 0) { logger.debug("Detected changes, dest is {} operations behind the source", behind); changed.set(true); @@ -763,7 +830,13 @@ public boolean sourceHasChanged() { } }, e -> { changed.set(false); - logger.error("failure in update check", e); + logger.warn( + "Failed to detect changes for data frame transform [" + transformId + "], skipping update till next check", + e); + + auditor.warning(transformId, + "Failed to detect changes for data frame transform, skipping update till next check. Exception: " + + e.getMessage()); }), latch)); try { @@ -772,7 +845,11 @@ public boolean sourceHasChanged() { return changed.get(); } } catch (InterruptedException e) { - logger.error("Failed to check for update", e); + logger.warn("Failed to detect changes for data frame transform [" + transformId + "], skipping update till next check", e); + + auditor.warning(transformId, + "Failed to detect changes for data frame transform, skipping update till next check. Exception: " + + e.getMessage()); } return false; @@ -787,10 +864,10 @@ synchronized void handleFailure(Exception e) { return; } - if (isIrrecoverableFailure(e) || failureCount.incrementAndGet() > MAX_CONTINUOUS_FAILURES) { + if (isIrrecoverableFailure(e) || failureCount.incrementAndGet() > transformTask.getNumFailureRetries()) { String failureMessage = isIrrecoverableFailure(e) ? "task encountered irrecoverable failure: " + e.getMessage() : - "task encountered more than " + MAX_CONTINUOUS_FAILURES + " failures; latest failure: " + e.getMessage(); + "task encountered more than " + transformTask.getNumFailureRetries() + " failures; latest failure: " + e.getMessage(); failIndexer(failureMessage); } } @@ -806,4 +883,11 @@ protected void failIndexer(String failureMessage) { }, e -> {})); } } + + // Considered a recoverable indexing failure + private static class BulkIndexingException extends ElasticsearchException { + BulkIndexingException(String msg, Object... args) { + super(msg, args); + } + } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidator.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidator.java new file mode 100644 index 0000000000000..3f5ae039a9ac6 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidator.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * This class contains more complex validations in regards to how {@link DataFrameTransformConfig#getSource()} and + * {@link DataFrameTransformConfig#getDestination()} relate to each other. + */ +public final class SourceDestValidator { + + interface SourceDestValidation { + boolean isDeferrable(); + void validate(DataFrameTransformConfig config, ClusterState clusterState, IndexNameExpressionResolver indexNameExpressionResolver); + } + + private static final List VALIDATIONS = Arrays.asList(new SourceMissingValidation(), + new DestinationInSourceValidation(), + new DestinationSingleIndexValidation()); + + /** + * Validates the DataFrameTransformConfiguration source and destination indices. + * + * A simple name validation is done on {@link DataFrameTransformConfig#getDestination()} inside + * {@link org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction} + * + * So, no need to do the name checks here. + * + * @param config DataFrameTransformConfig to validate + * @param clusterState The current ClusterState + * @param indexNameExpressionResolver A valid IndexNameExpressionResolver object + * @throws ElasticsearchStatusException when a validation fails + */ + public static void validate(DataFrameTransformConfig config, + ClusterState clusterState, + IndexNameExpressionResolver indexNameExpressionResolver, + boolean shouldDefer) { + for (SourceDestValidation validation : VALIDATIONS) { + if (shouldDefer && validation.isDeferrable()) { + continue; + } + validation.validate(config, clusterState, indexNameExpressionResolver); + } + } + + static class SourceMissingValidation implements SourceDestValidation { + + @Override + public boolean isDeferrable() { + return true; + } + + @Override + public void validate(DataFrameTransformConfig config, + ClusterState clusterState, + IndexNameExpressionResolver indexNameExpressionResolver) { + for(String src : config.getSource().getIndex()) { + String[] concreteNames = indexNameExpressionResolver.concreteIndexNames(clusterState, + IndicesOptions.lenientExpandOpen(), + src); + if (concreteNames.length == 0) { + throw new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING, src), + RestStatus.BAD_REQUEST); + } + } + } + } + + static class DestinationInSourceValidation implements SourceDestValidation { + + @Override + public boolean isDeferrable() { + return true; + } + + @Override + public void validate(DataFrameTransformConfig config, + ClusterState clusterState, + IndexNameExpressionResolver indexNameExpressionResolver) { + final String destIndex = config.getDestination().getIndex(); + Set concreteSourceIndexNames = new HashSet<>(); + for(String src : config.getSource().getIndex()) { + String[] concreteNames = indexNameExpressionResolver.concreteIndexNames(clusterState, + IndicesOptions.lenientExpandOpen(), + src); + if (Regex.simpleMatch(src, destIndex)) { + throw new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, destIndex, src), + RestStatus.BAD_REQUEST); + } + concreteSourceIndexNames.addAll(Arrays.asList(concreteNames)); + } + + if (concreteSourceIndexNames.contains(destIndex)) { + throw new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, + destIndex, + Strings.arrayToCommaDelimitedString(config.getSource().getIndex())), + RestStatus.BAD_REQUEST + ); + } + + final String[] concreteDest = indexNameExpressionResolver.concreteIndexNames(clusterState, + IndicesOptions.lenientExpandOpen(), + destIndex); + if (concreteDest.length > 0 && concreteSourceIndexNames.contains(concreteDest[0])) { + throw new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, + concreteDest[0], + Strings.arrayToCommaDelimitedString(concreteSourceIndexNames.toArray(new String[0]))), + RestStatus.BAD_REQUEST + ); + } + } + } + + static class DestinationSingleIndexValidation implements SourceDestValidation { + + @Override + public boolean isDeferrable() { + return false; + } + + @Override + public void validate(DataFrameTransformConfig config, + ClusterState clusterState, + IndexNameExpressionResolver indexNameExpressionResolver) { + final String destIndex = config.getDestination().getIndex(); + final String[] concreteDest = + indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), destIndex); + + if (concreteDest.length > 1) { + throw new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_SINGLE_INDEX, destIndex), + RestStatus.BAD_REQUEST + ); + } + } + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java index 0c0104603f9c4..46b27938648b1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -51,6 +52,7 @@ public class Pivot { private static final Logger logger = LogManager.getLogger(Pivot.class); private final PivotConfig config; + private final boolean supportsIncrementalBucketUpdate; // objects for re-using private final CompositeAggregationBuilder cachedCompositeAggregation; @@ -58,19 +60,37 @@ public class Pivot { public Pivot(PivotConfig config) { this.config = config; this.cachedCompositeAggregation = createCompositeAggregation(config); + + boolean supportsIncrementalBucketUpdate = false; + for(Entry entry: config.getGroupConfig().getGroups().entrySet()) { + supportsIncrementalBucketUpdate |= entry.getValue().supportsIncrementalBucketUpdate(); + } + + this.supportsIncrementalBucketUpdate = supportsIncrementalBucketUpdate; } - public void validate(Client client, SourceConfig sourceConfig, final ActionListener listener) { - // step 1: check if used aggregations are supported + public void validateConfig() { for (AggregationBuilder agg : config.getAggregationConfig().getAggregatorFactories()) { if (Aggregations.isSupportedByDataframe(agg.getType()) == false) { - listener.onFailure(new RuntimeException("Unsupported aggregation type [" + agg.getType() + "]")); - return; + throw new RuntimeException("Unsupported aggregation type [" + agg.getType() + "]"); } } + } + + public void validateQuery(Client client, SourceConfig sourceConfig, final ActionListener listener) { + SearchRequest searchRequest = buildSearchRequest(sourceConfig, null, TEST_QUERY_PAGE_SIZE); - // step 2: run a query to validate that config is valid - runTestQuery(client, sourceConfig, listener); + client.execute(SearchAction.INSTANCE, searchRequest, ActionListener.wrap(response -> { + if (response == null) { + listener.onFailure(new RuntimeException("Unexpected null response from test query")); + return; + } + if (response.status() != RestStatus.OK) { + listener.onFailure(new RuntimeException("Unexpected status from response of test query: "+ response.status())); + return; + } + listener.onResponse(true); + }, e -> listener.onFailure(new RuntimeException("Failed to test query", e)))); } public void deduceMappings(Client client, SourceConfig sourceConfig, final ActionListener> listener) { @@ -104,8 +124,8 @@ public SearchRequest buildSearchRequest(SourceConfig sourceConfig, Map position, int pageSize) { @@ -135,6 +155,10 @@ public Map> initialIncrementalBucketUpdateMap() { return changedBuckets; } + public boolean supportsIncrementalBucketUpdate() { + return supportsIncrementalBucketUpdate; + } + public Stream> extractResults(CompositeAggregation agg, Map fieldTypeMap, DataFrameIndexerTransformStats dataFrameIndexerTransformStats) { @@ -151,24 +175,6 @@ public Stream> extractResults(CompositeAggregation agg, dataFrameIndexerTransformStats); } - private void runTestQuery(Client client, SourceConfig sourceConfig, final ActionListener listener) { - SearchRequest searchRequest = buildSearchRequest(sourceConfig, null, TEST_QUERY_PAGE_SIZE); - - client.execute(SearchAction.INSTANCE, searchRequest, ActionListener.wrap(response -> { - if (response == null) { - listener.onFailure(new RuntimeException("Unexpected null response from test query")); - return; - } - if (response.status() != RestStatus.OK) { - listener.onFailure(new RuntimeException("Unexpected status from response of test query: " + response.status())); - return; - } - listener.onResponse(true); - }, e->{ - listener.onFailure(new RuntimeException("Failed to test query", e)); - })); - } - public QueryBuilder filterBuckets(Map> changedBuckets) { if (changedBuckets == null || changedBuckets.isEmpty()) { @@ -234,4 +240,5 @@ private static CompositeAggregationBuilder createCompositeAggregationSources(Piv } return compositeAggregation; } + } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java index 4ac77c38f7d5f..2aad83f5a2556 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -174,6 +175,7 @@ private static void getSourceFieldMappings(Client client, String[] index, String GetFieldMappingsRequest fieldMappingRequest = new GetFieldMappingsRequest(); fieldMappingRequest.indices(index); fieldMappingRequest.fields(fields); + fieldMappingRequest.indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); client.execute(GetFieldMappingsAction.INSTANCE, fieldMappingRequest, ActionListener.wrap( response -> listener.onResponse(extractFieldMappings(response.mappings())), diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java index 276d3af39ef13..d13c33fe9aabf 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java @@ -31,10 +31,10 @@ public void testDataframeNodes() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); tasksBuilder.addTask(dataFrameIdFoo, - DataFrameField.TASK_NAME, new DataFrameTransform(dataFrameIdFoo, Version.CURRENT), + DataFrameField.TASK_NAME, new DataFrameTransform(dataFrameIdFoo, Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); tasksBuilder.addTask(dataFrameIdBar, - DataFrameField.TASK_NAME, new DataFrameTransform(dataFrameIdBar, Version.CURRENT), + DataFrameField.TASK_NAME, new DataFrameTransform(dataFrameIdBar, Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("node-2", "test assignment")); tasksBuilder.addTask("test-task1", "testTasks", new PersistentTaskParams() { @Override diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java new file mode 100644 index 0000000000000..61fad63c83253 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; +import org.elasticsearch.xpack.core.indexing.IndexerState; + +import java.util.Arrays; +import java.util.Collections; + +import static org.elasticsearch.rest.RestStatus.CONFLICT; +import static org.hamcrest.Matchers.equalTo; + +public class TransportStopDataFrameTransformActionTests extends ESTestCase { + + private MetaData.Builder buildMetadata(PersistentTasksCustomMetaData ptasks) { + return MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, ptasks); + } + + public void testTaskStateValidationWithNoTasks() { + MetaData.Builder metaData = MetaData.builder(); + ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(metaData); + TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + + PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder(); + csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); + TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + } + + public void testTaskStateValidationWithDataFrameTasks() { + // Test with the task state being null + PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder() + .addTask("non-failed-task", + DataFrameTransform.NAME, + new DataFrameTransform("data-frame-task-1", Version.CURRENT, null), + new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "")); + ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); + + TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + + // test again with a non failed task but this time it has internal state + pTasksBuilder.updateTaskState("non-failed-task", new DataFrameTransformState(DataFrameTransformTaskState.STOPPED, + IndexerState.STOPPED, + null, + 0L, + null, + null)); + csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); + + TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + + pTasksBuilder.addTask("failed-task", + DataFrameTransform.NAME, + new DataFrameTransform("data-frame-task-1", Version.CURRENT, null), + new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "")) + .updateTaskState("failed-task", new DataFrameTransformState(DataFrameTransformTaskState.FAILED, + IndexerState.STOPPED, + null, + 0L, + "task has failed", + null)); + csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); + + TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Arrays.asList("non-failed-task", "failed-task"), true); + + TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + + ClusterState.Builder csBuilderFinal = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> TransportStopDataFrameTransformAction.validateTaskState(csBuilderFinal.build(), + Collections.singletonList("failed-task"), + false)); + + assertThat(ex.status(), equalTo(CONFLICT)); + assertThat(ex.getMessage(), + equalTo(DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + "failed-task", + "task has failed"))); + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java index 9cc2769e7d149..ae5fbf7809f53 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java @@ -52,7 +52,24 @@ public void testExtractIndexCheckpoints() { Map expectedCheckpoints = new HashMap<>(); Set indices = randomUserIndices(); - ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, false, false); + ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, false, false, false); + + Map checkpoints = DataFrameTransformsCheckpointService.extractIndexCheckPoints(shardStatsArray, indices); + + assertEquals(expectedCheckpoints.size(), checkpoints.size()); + assertEquals(expectedCheckpoints.keySet(), checkpoints.keySet()); + + // low-level compare + for (Entry entry : expectedCheckpoints.entrySet()) { + assertTrue(Arrays.equals(entry.getValue(), checkpoints.get(entry.getKey()))); + } + } + + public void testExtractIndexCheckpointsMissingSeqNoStats() { + Map expectedCheckpoints = new HashMap<>(); + Set indices = randomUserIndices(); + + ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, false, false, true); Map checkpoints = DataFrameTransformsCheckpointService.extractIndexCheckPoints(shardStatsArray, indices); @@ -69,7 +86,7 @@ public void testExtractIndexCheckpointsLostPrimaries() { Map expectedCheckpoints = new HashMap<>(); Set indices = randomUserIndices(); - ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, true, false); + ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, true, false, false); Map checkpoints = DataFrameTransformsCheckpointService.extractIndexCheckPoints(shardStatsArray, indices); @@ -86,13 +103,14 @@ public void testExtractIndexCheckpointsInconsistentGlobalCheckpoints() { Map expectedCheckpoints = new HashMap<>(); Set indices = randomUserIndices(); - ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, randomBoolean(), true); + ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, randomBoolean(), true, false); // fail CheckpointException e = expectThrows(CheckpointException.class, () -> DataFrameTransformsCheckpointService.extractIndexCheckPoints(shardStatsArray, indices)); - assertThat(e.getMessage(), containsString("Global checkpoints mismatch")); } + assertThat(e.getMessage(), containsString("Global checkpoints mismatch")); + } /** * Create a random set of 3 index names @@ -120,10 +138,11 @@ private static Set randomUserIndices() { * @param userIndices set of indices that are visible * @param skipPrimaries whether some shards do not have a primary shard at random * @param inconsistentGlobalCheckpoints whether to introduce inconsistent global checkpoints + * @param missingSeqNoStats whether some indices miss SeqNoStats * @return array of ShardStats */ private static ShardStats[] createRandomShardStats(Map expectedCheckpoints, Set userIndices, - boolean skipPrimaries, boolean inconsistentGlobalCheckpoints) { + boolean skipPrimaries, boolean inconsistentGlobalCheckpoints, boolean missingSeqNoStats) { // always create the full list List indices = new ArrayList<>(); @@ -131,6 +150,8 @@ private static ShardStats[] createRandomShardStats(Map expectedC indices.add(new Index("index-2", UUIDs.randomBase64UUID(random()))); indices.add(new Index("index-3", UUIDs.randomBase64UUID(random()))); + String missingSeqNoStatsIndex = randomFrom(userIndices); + List shardStats = new ArrayList<>(); for (final Index index : indices) { int numShards = randomIntBetween(1, 5); @@ -160,8 +181,15 @@ private static ShardStats[] createRandomShardStats(Map expectedC long globalCheckpoint = randomBoolean() ? localCheckpoint : randomLongBetween(0L, 100000000L); long maxSeqNo = Math.max(localCheckpoint, globalCheckpoint); - final SeqNoStats validSeqNoStats = new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint); - checkpoints.add(globalCheckpoint); + SeqNoStats validSeqNoStats = null; + + // add broken seqNoStats if requested + if (missingSeqNoStats && index.getName().equals(missingSeqNoStatsIndex)) { + checkpoints.add(0L); + } else { + validSeqNoStats = new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint); + checkpoints.add(globalCheckpoint); + } for (int replica = 0; replica < numShardCopies; replica++) { ShardId shardId = new ShardId(index, shardIndex); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java new file mode 100644 index 0000000000000..00a7b5246ef41 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.client.Client; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; +import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; + +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ClientDataFrameIndexerTests extends ESTestCase { + + public void testAudiOnFinishFrequency() { + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.executor("generic")).thenReturn(mock(ExecutorService.class)); + DataFrameTransformTask parentTask = new DataFrameTransformTask(1, + "dataframe", + "ptask", + new TaskId("dataframe:1"), + mock(DataFrameTransform.class), + null, + mock(SchedulerEngine.class), + mock(DataFrameAuditor.class), + threadPool, + Collections.emptyMap()); + DataFrameTransformTask.ClientDataFrameIndexer indexer = new DataFrameTransformTask.ClientDataFrameIndexer(randomAlphaOfLength(10), + mock(DataFrameTransformsConfigManager.class), + mock(DataFrameTransformsCheckpointService.class), + new AtomicReference<>(IndexerState.STOPPED), + null, + mock(Client.class), + mock(DataFrameAuditor.class), + mock(DataFrameIndexerTransformStats.class), + mock(DataFrameTransformConfig.class), + Collections.emptyMap(), + null, + new DataFrameTransformCheckpoint("transform", + Instant.now().toEpochMilli(), + 0L, + Collections.emptyMap(), + Instant.now().toEpochMilli()), + new DataFrameTransformCheckpoint("transform", + Instant.now().toEpochMilli(), + 2L, + Collections.emptyMap(), + Instant.now().toEpochMilli()), + parentTask); + + List shouldAudit = IntStream.range(0, 100_000).boxed().map(indexer::shouldAuditOnFinish).collect(Collectors.toList()); + + // Audit every checkpoint for the first 10 + assertTrue(shouldAudit.get(0)); + assertTrue(shouldAudit.get(1)); + assertTrue(shouldAudit.get(9)); + + // Then audit every 10 while < 100 + assertFalse(shouldAudit.get(10)); + assertFalse(shouldAudit.get(11)); + assertTrue(shouldAudit.get(19)); + assertTrue(shouldAudit.get(29)); + assertFalse(shouldAudit.get(30)); + assertTrue(shouldAudit.get(99)); + + // Then audit every 100 < 1000 + assertFalse(shouldAudit.get(100)); + assertFalse(shouldAudit.get(109)); + assertFalse(shouldAudit.get(110)); + assertTrue(shouldAudit.get(199)); + + // Then audit every 1000 for the rest of time + assertTrue(shouldAudit.get(1999)); + assertFalse(shouldAudit.get(2199)); + assertTrue(shouldAudit.get(2999)); + assertTrue(shouldAudit.get(9999)); + assertTrue(shouldAudit.get(10_999)); + assertFalse(shouldAudit.get(11_000)); + assertTrue(shouldAudit.get(11_999)); + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java index e3e9ff81eb653..154588443cb2f 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; @@ -32,6 +33,8 @@ import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import org.junit.Before; +import java.io.PrintWriter; +import java.io.StringWriter; import java.util.Collections; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -68,13 +71,13 @@ class MockedDataFrameIndexer extends DataFrameIndexer { Map fieldMappings, DataFrameAuditor auditor, AtomicReference initialState, - Map initialPosition, + DataFrameIndexerPosition initialPosition, DataFrameIndexerTransformStats jobStats, Function searchFunction, Function bulkFunction, Consumer failureConsumer) { super(executor, auditor, transformConfig, fieldMappings, initialState, initialPosition, jobStats, - /* DataFrameTransformProgress */ null, DataFrameTransformCheckpoint.EMPTY); + /* DataFrameTransformProgress */ null, DataFrameTransformCheckpoint.EMPTY, DataFrameTransformCheckpoint.EMPTY); this.searchFunction = searchFunction; this.bulkFunction = bulkFunction; this.failureConsumer = failureConsumer; @@ -129,7 +132,7 @@ protected void doNextBulk(BulkRequest request, ActionListener next } @Override - protected void doSaveState(IndexerState state, Map position, Runnable next) { + protected void doSaveState(IndexerState state, DataFrameIndexerPosition position, Runnable next) { assert state == IndexerState.STARTED || state == IndexerState.INDEXING || state == IndexerState.STOPPED; next.run(); } @@ -186,6 +189,7 @@ public void testPageSizeAdapt() throws InterruptedException { randomDestConfig(), null, null, + null, new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig(), pageSize), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); @@ -197,7 +201,12 @@ public void testPageSizeAdapt() throws InterruptedException { Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); - Consumer failureConsumer = e -> fail("expected circuit breaker exception to be handled"); + Consumer failureConsumer = e -> { + final StringWriter sw = new StringWriter(); + final PrintWriter pw = new PrintWriter(sw, true); + e.printStackTrace(pw); + fail("expected circuit breaker exception to be handled, got:" + e + " Trace: " + sw.getBuffer().toString()); + }; final ExecutorService executor = Executors.newFixedThreadPool(1); try { diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java index 49cdabfdf0601..6e03669eeb40a 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java @@ -21,6 +21,8 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -41,6 +43,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class DataFrameTransformPersistentTasksExecutorTests extends ESTestCase { @@ -51,15 +54,15 @@ public void testNodeVersionAssignment() { PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder() .addTask("data-frame-task-1", DataFrameTransform.NAME, - new DataFrameTransform("data-frame-task-1", Version.CURRENT), + new DataFrameTransform("data-frame-task-1", Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "")) .addTask("data-frame-task-2", DataFrameTransform.NAME, - new DataFrameTransform("data-frame-task-2", Version.CURRENT), + new DataFrameTransform("data-frame-task-2", Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("current-data-node-with-2-tasks", "")) .addTask("data-frame-task-3", DataFrameTransform.NAME, - new DataFrameTransform("data-frame-task-3", Version.CURRENT), + new DataFrameTransform("data-frame-task-3", Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("current-data-node-with-2-tasks", "")); PersistentTasksCustomMetaData pTasks = pTasksBuilder.build(); @@ -98,16 +101,21 @@ public void testNodeVersionAssignment() { DataFrameTransformsConfigManager transformsConfigManager = new DataFrameTransformsConfigManager(client, xContentRegistry()); DataFrameTransformsCheckpointService dataFrameTransformsCheckpointService = new DataFrameTransformsCheckpointService(client, transformsConfigManager); - + ClusterSettings cSettings = new ClusterSettings(Settings.EMPTY, + Collections.singleton(DataFrameTransformTask.NUM_FAILURE_RETRIES_SETTING)); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn(cSettings); DataFrameTransformPersistentTasksExecutor executor = new DataFrameTransformPersistentTasksExecutor(client, transformsConfigManager, dataFrameTransformsCheckpointService, mock(SchedulerEngine.class), new DataFrameAuditor(client, ""), - mock(ThreadPool.class)); + mock(ThreadPool.class), + clusterService, + Settings.EMPTY); - assertThat(executor.getAssignment(new DataFrameTransform("new-task-id", Version.CURRENT), cs).getExecutorNode(), + assertThat(executor.getAssignment(new DataFrameTransform("new-task-id", Version.CURRENT, null), cs).getExecutorNode(), equalTo("current-data-node-with-1-tasks")); - assertThat(executor.getAssignment(new DataFrameTransform("new-old-task-id", Version.V_7_2_0), cs).getExecutorNode(), + assertThat(executor.getAssignment(new DataFrameTransform("new-old-task-id", Version.V_7_2_0, null), cs).getExecutorNode(), equalTo("past-data-node-1")); } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidatorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidatorTests.java new file mode 100644 index 0000000000000..c9f4a0bc06bc4 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidatorTests.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfigTests; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; +import static org.hamcrest.Matchers.equalTo; + +public class SourceDestValidatorTests extends ESTestCase { + + private static final String SOURCE_1 = "source-1"; + private static final String SOURCE_2 = "source-2"; + private static final String ALIASED_DEST = "aliased-dest"; + + private static final ClusterState CLUSTER_STATE; + + static { + IndexMetaData source1 = IndexMetaData.builder(SOURCE_1).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())) + .putAlias(AliasMetaData.builder("source-1-alias").build()) + .build(); + IndexMetaData source2 = IndexMetaData.builder(SOURCE_2).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())) + .putAlias(AliasMetaData.builder("dest-alias").build()) + .build(); + IndexMetaData aliasedDest = IndexMetaData.builder(ALIASED_DEST).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())) + .putAlias(AliasMetaData.builder("dest-alias").build()) + .build(); + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.metaData(MetaData.builder() + .put(IndexMetaData.builder(source1).build(), false) + .put(IndexMetaData.builder(source2).build(), false) + .put(IndexMetaData.builder(aliasedDest).build(), false)); + CLUSTER_STATE = state.build(); + } + + public void testCheck_GivenSimpleSourceIndexAndValidDestIndex() { + DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig(SOURCE_1), new DestConfig("dest", null)); + SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false); + } + + public void testCheck_GivenMissingConcreteSourceIndex() { + DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig("missing"), new DestConfig("dest", null)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Source index [missing] does not exist")); + SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), true); + } + + public void testCheck_GivenMissingWildcardSourceIndex() { + DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig("missing*"), new DestConfig("dest", null)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Source index [missing*] does not exist")); + SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), true); + } + + public void testCheck_GivenDestIndexSameAsSourceIndex() { + DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig(SOURCE_1), new DestConfig("source-1", null)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Destination index [source-1] is included in source expression [source-1]")); + SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), true); + } + + public void testCheck_GivenDestIndexMatchesSourceIndex() { + DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig("source-*"), new DestConfig(SOURCE_2, null)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Destination index [source-2] is included in source expression [source-*]")); + SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), true); + } + + public void testCheck_GivenDestIndexMatchesOneOfSourceIndices() { + DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig("source-1", "source-*"), + new DestConfig(SOURCE_2, null)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Destination index [source-2] is included in source expression [source-*]")); + SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), true); + } + + public void testCheck_GivenDestIndexIsAliasThatMatchesMultipleIndices() { + DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig(SOURCE_1), new DestConfig("dest-alias", null)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), + equalTo("Destination index [dest-alias] should refer to a single index")); + + e = expectThrows(ElasticsearchStatusException.class, + () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), true)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), + equalTo("Destination index [dest-alias] should refer to a single index")); + } + + public void testCheck_GivenDestIndexIsAliasThatIsIncludedInSource() { + DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig(SOURCE_1), new DestConfig("source-1-alias", null)); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), + equalTo("Destination index [source-1] is included in source expression [source-1]")); + + SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), true); + } + + private static DataFrameTransformConfig createDataFrameTransform(SourceConfig sourceConfig, DestConfig destConfig) { + return new DataFrameTransformConfig("test", + sourceConfig, + destConfig, + TimeValue.timeValueSeconds(60), + null, + null, + PivotConfigTests.randomPivotConfig(), + null); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java index a9e3f118b6371..db3a29d394ca4 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -47,7 +47,9 @@ import java.util.stream.Stream; import static java.util.Collections.emptyList; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; public class PivotTests extends ESTestCase { @@ -140,10 +142,10 @@ public void testValidateAllSupportedAggregations() throws Exception { public void testValidateAllUnsupportedAggregations() throws Exception { for (String agg : unsupportedAggregations) { AggregationConfig aggregationConfig = getAggregationConfig(agg); - SourceConfig source = new SourceConfig(new String[]{"existing_source"}, QueryConfig.matchAll()); Pivot pivot = new Pivot(getValidPivotConfig(aggregationConfig)); - assertInvalidTransform(client, source, pivot); + RuntimeException ex = expectThrows(RuntimeException.class, pivot::validateConfig); + assertThat("expected aggregations to be unsupported, but they were", ex, is(notNullValue())); } } @@ -248,7 +250,7 @@ private static void assertInvalidTransform(Client client, SourceConfig source, P private static void validate(Client client, SourceConfig source, Pivot pivot, boolean expectValid) throws Exception { CountDownLatch latch = new CountDownLatch(1); final AtomicReference exceptionHolder = new AtomicReference<>(); - pivot.validate(client, source, ActionListener.wrap(validity -> { + pivot.validateQuery(client, source, ActionListener.wrap(validity -> { assertEquals(expectValid, validity); latch.countDown(); }, e -> { diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java index 6b55006983d0c..bc16c98a2773c 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.license.LicenseUtils; @@ -34,6 +35,7 @@ import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -72,8 +74,8 @@ protected String executor() { } @Override - protected DeprecationInfoAction.Response newResponse() { - return new DeprecationInfoAction.Response(); + protected DeprecationInfoAction.Response read(StreamInput in) throws IOException { + return new DeprecationInfoAction.Response(in); } @Override diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java index eeb3c7e8c8d75..b05f3987e6375 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.tasks.Task; @@ -22,6 +23,7 @@ import org.elasticsearch.xpack.core.deprecation.NodesDeprecationCheckRequest; import org.elasticsearch.xpack.core.deprecation.NodesDeprecationCheckResponse; +import java.io.IOException; import java.util.List; public class TransportNodeDeprecationCheckAction extends TransportNodesAction getEngineFactory(IndexSettings indexSettings) { + if (indexSettings.getValue(FrozenEngine.INDEX_FROZEN)) { + return Optional.of(FrozenEngine::new); + } else { + return Optional.empty(); + } + } + + @Override + public List> getSettings() { + return Arrays.asList(FrozenEngine.INDEX_FROZEN); + } + + @Override + public void onIndexModule(IndexModule indexModule) { + if (FrozenEngine.INDEX_FROZEN.get(indexModule.getSettings())) { + indexModule.addSearchOperationListener(new FrozenEngine.ReacquireEngineSearcherListener()); + } + super.onIndexModule(indexModule); + } + + @Override + public List> getActions() { + List> actions = new ArrayList<>(); + actions.add(new ActionHandler<>(XPackUsageFeatureAction.FROZEN_INDICES, FrozenIndicesUsageTransportAction.class)); + actions.add(new ActionHandler<>(XPackInfoFeatureAction.FROZEN_INDICES, FrozenIndicesInfoTransportAction.class)); + actions.add(new ActionHandler<>(FreezeIndexAction.INSTANCE, TransportFreezeIndexAction.class)); + return actions; + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + return Collections.singletonList(new RestFreezeIndexAction(settings, restController)); + } +} diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndicesInfoTransportAction.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndicesInfoTransportAction.java new file mode 100644 index 0000000000000..fd359626dc67c --- /dev/null +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndicesInfoTransportAction.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.frozen; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; + +public class FrozenIndicesInfoTransportAction extends XPackInfoFeatureTransportAction { + + @Inject + public FrozenIndicesInfoTransportAction(TransportService transportService, ActionFilters actionFilters) { + super(XPackInfoFeatureAction.FROZEN_INDICES.name(), transportService, actionFilters); + } + + @Override + public String name() { + return XPackField.FROZEN_INDICES; + } + + @Override + public boolean available() { + return true; + } + + @Override + public boolean enabled() { + return true; + } +} diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndicesUsageTransportAction.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndicesUsageTransportAction.java new file mode 100644 index 0000000000000..10c6d1348d808 --- /dev/null +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndicesUsageTransportAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.frozen; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.engine.FrozenEngine; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; +import org.elasticsearch.xpack.core.frozen.FrozenIndicesFeatureSetUsage; + +public class FrozenIndicesUsageTransportAction extends XPackUsageFeatureTransportAction { + + @Inject + public FrozenIndicesUsageTransportAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(XPackUsageFeatureAction.FROZEN_INDICES.name(), transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver); + } + + @Override + protected void masterOperation(Task task, XPackUsageRequest request, ClusterState state, + ActionListener listener) { + int numFrozenIndices = 0; + for (IndexMetaData indexMetaData : state.metaData()) { + if (FrozenEngine.INDEX_FROZEN.get(indexMetaData.getSettings())) { + numFrozenIndices++; + } + } + listener.onResponse(new XPackUsageFeatureResponse(new FrozenIndicesFeatureSetUsage(true, true, numFrozenIndices))); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java similarity index 61% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java rename to x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java index 228f5c9f2be30..a787a7945fc83 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java @@ -3,24 +3,17 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.action; +package org.elasticsearch.xpack.frozen.action; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -37,25 +30,23 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.FrozenEngine; +import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; +import org.elasticsearch.protocol.xpack.frozen.FreezeResponse; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.frozen.action.FreezeIndexAction; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.action.ValidateActions.addValidationError; - public final class TransportFreezeIndexAction extends - TransportMasterNodeAction { + TransportMasterNodeAction { private final DestructiveOperations destructiveOperations; private final MetaDataIndexStateService indexStateService; @@ -68,8 +59,8 @@ public TransportFreezeIndexAction(MetaDataIndexStateService indexStateService, T IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations, TransportCloseIndexAction transportCloseIndexAction) { - super(FreezeIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - FreezeRequest::new); + super(FreezeIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, FreezeRequest::new, + indexNameExpressionResolver); this.destructiveOperations = destructiveOperations; this.indexStateService = indexStateService; this.transportCloseIndexAction = transportCloseIndexAction; @@ -85,11 +76,6 @@ protected void doExecute(Task task, FreezeRequest request, ActionListener listener) throws Exception { + protected void masterOperation(Task task, FreezeRequest request, ClusterState state, + ActionListener listener) throws Exception { final Index[] concreteIndices = resolveIndices(request, state); if (concreteIndices.length == 0) { listener.onResponse(new FreezeResponse(true, true)); @@ -216,137 +202,4 @@ protected ClusterBlockException checkBlock(FreezeRequest request, ClusterState s return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); } - - public static class FreezeResponse extends OpenIndexResponse { - FreezeResponse(StreamInput in) throws IOException { - super(in); - } - - public FreezeResponse(boolean acknowledged, boolean shardsAcknowledged) { - super(acknowledged, shardsAcknowledged); - } - } - - public static class FreezeIndexAction extends ActionType { - - public static final FreezeIndexAction INSTANCE = new FreezeIndexAction(); - public static final String NAME = "indices:admin/freeze"; - - private FreezeIndexAction() { - super(NAME); - } - - @Override - public Writeable.Reader getResponseReader() { - return FreezeResponse::new; - } - } - - public static class FreezeRequest extends AcknowledgedRequest - implements IndicesRequest.Replaceable { - private String[] indices; - private boolean freeze = true; - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); - private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; - - public FreezeRequest(String... indices) { - this.indices = indices; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (CollectionUtils.isEmpty(indices)) { - validationException = addValidationError("index is missing", validationException); - } - return validationException; - } - - public FreezeRequest setFreeze(boolean freeze) { - this.freeze = freeze; - return this; - } - - public boolean freeze() { - return freeze; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - indicesOptions = IndicesOptions.readIndicesOptions(in); - indices = in.readStringArray(); - freeze = in.readBoolean(); - waitForActiveShards = ActiveShardCount.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - indicesOptions.writeIndicesOptions(out); - out.writeStringArray(indices); - out.writeBoolean(freeze); - waitForActiveShards.writeTo(out); - } - - /** - * @return the indices to be frozen or unfrozen - */ - @Override - public String[] indices() { - return indices; - } - - /** - * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. - * For example indices that don't exist. - * - * @return the current behaviour when it comes to index names and wildcard indices expressions - */ - @Override - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - /** - * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. - * For example indices that don't exist. - * - * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions - * @return the request itself - */ - public FreezeRequest indicesOptions(IndicesOptions indicesOptions) { - this.indicesOptions = indicesOptions; - return this; - } - - @Override - public IndicesRequest indices(String... indices) { - this.indices = indices; - return this; - } - - public ActiveShardCount waitForActiveShards() { - return waitForActiveShards; - } - - /** - * Sets the number of shard copies that should be active for indices opening to return. - * Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy - * (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to - * wait for all shards (primary and all replicas) to be active before returning. - * Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any - * non-negative integer, up to the number of copies per shard (number of replicas + 1), - * to wait for the desired amount of shard copies to become active before returning. - * Indices opening will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link OpenIndexResponse#isShardsAcknowledged()} to - * determine if the requisite shard copies were all started before returning or timing out. - * - * @param waitForActiveShards number of active shard copies to wait on - */ - public FreezeRequest waitForActiveShards(ActiveShardCount waitForActiveShards) { - this.waitForActiveShards = waitForActiveShards; - return this; - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java similarity index 83% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java rename to x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java index 580acbae0dc87..81943dc2ec680 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java @@ -3,19 +3,19 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rest.action; +package org.elasticsearch.xpack.frozen.rest.action; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; +import org.elasticsearch.xpack.core.frozen.action.FreezeIndexAction; public final class RestFreezeIndexAction extends BaseRestHandler { public RestFreezeIndexAction(Settings settings, RestController controller) { @@ -27,8 +27,7 @@ public RestFreezeIndexAction(Settings settings, RestController controller) { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { boolean freeze = request.path().endsWith("/_freeze"); - TransportFreezeIndexAction.FreezeRequest freezeRequest = - new TransportFreezeIndexAction.FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); + FreezeRequest freezeRequest = new FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); freezeRequest.timeout(request.paramAsTime("timeout", freezeRequest.timeout())); freezeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", freezeRequest.masterNodeTimeout())); freezeRequest.indicesOptions(IndicesOptions.fromRequest(request, freezeRequest.indicesOptions())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java similarity index 100% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java rename to x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java similarity index 100% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java rename to x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java similarity index 92% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java rename to x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index f1009542d1325..6832433106be7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -33,15 +33,15 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; +import org.elasticsearch.xpack.core.frozen.action.FreezeIndexAction; +import org.elasticsearch.xpack.frozen.FrozenIndices; import org.hamcrest.Matchers; import java.io.IOException; @@ -62,7 +62,7 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(XPackPlugin.class); + return pluginList(FrozenIndices.class); } public void testCloseFreezeAndOpen() { @@ -70,7 +70,7 @@ public void testCloseFreezeAndOpen() { client().prepareIndex("index", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("index")).actionGet()); expectThrows(ClusterBlockException.class, () -> client().prepareIndex("index", "_doc", "4").setSource("field", "value") .setRefreshPolicy(IMMEDIATE).get()); IndicesService indexServices = getInstanceFromNode(IndicesService.class); @@ -115,30 +115,29 @@ public void testSearchAndGetAPIsAreThrottled() throws InterruptedException, IOEx for (int i = 0; i < 10; i++) { client().prepareIndex("index", "_doc", "" + i).setSource("field", "foo bar baz").get(); } - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("index")).actionGet()); int numRequests = randomIntBetween(20, 50); CountDownLatch latch = new CountDownLatch(numRequests); - ActionListener listener = ActionListener.wrap(latch::countDown); int numRefreshes = 0; for (int i = 0; i < numRequests; i++) { numRefreshes++; switch (randomIntBetween(0, 3)) { case 0: - client().prepareGet("index", "_doc", "" + randomIntBetween(0, 9)).execute(listener); + client().prepareGet("index", "_doc", "" + randomIntBetween(0, 9)).execute(ActionListener.wrap(latch::countDown)); break; case 1: client().prepareSearch("index").setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) .setSearchType(SearchType.QUERY_THEN_FETCH) - .execute(listener); + .execute(ActionListener.wrap(latch::countDown)); // in total 4 refreshes 1x query & 1x fetch per shard (we have 2) numRefreshes += 3; break; case 2: - client().prepareTermVectors("index", "" + randomIntBetween(0, 9)).execute(listener); + client().prepareTermVectors("index", "" + randomIntBetween(0, 9)).execute(ActionListener.wrap(latch::countDown)); break; case 3: client().prepareExplain("index", "_doc", "" + randomIntBetween(0, 9)).setQuery(new MatchAllQueryBuilder()) - .execute(listener); + .execute(ActionListener.wrap(latch::countDown)); break; default: assert false; @@ -158,7 +157,7 @@ public void testFreezeAndUnfreeze() { // sometimes close it assertAcked(client().admin().indices().prepareClose("index").get()); } - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("index")).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -168,7 +167,7 @@ public void testFreezeAndUnfreeze() { assertEquals(0, shard.refreshStats().getTotal()); } assertAcked(client().execute(FreezeIndexAction.INSTANCE, - new TransportFreezeIndexAction.FreezeRequest("index").setFreeze(false)).actionGet()); + new FreezeRequest("index").setFreeze(false)).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -191,9 +190,9 @@ private void assertIndexFrozen(String idx) { public void testDoubleFreeze() { createIndex("test-idx", Settings.builder().put("index.number_of_shards", 2).build()); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("test-idx")).actionGet()); ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, - () -> client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx") + () -> client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("test-idx") .indicesOptions(new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), EnumSet.of(IndicesOptions.WildcardStates.OPEN)))).actionGet()); assertEquals("no index found to freeze", exception.getMessage()); @@ -204,9 +203,9 @@ public void testUnfreezeClosedIndices() { client().prepareIndex("idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); createIndex("idx-closed", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("idx-closed", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx")).actionGet()); assertAcked(client().admin().indices().prepareClose("idx-closed").get()); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx*").setFreeze(false) + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx*").setFreeze(false) .indicesOptions(IndicesOptions.strictExpand())).actionGet()); ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); assertEquals(IndexMetaData.State.CLOSE, stateResponse.getState().getMetaData().index("idx-closed").getState()); @@ -219,7 +218,7 @@ public void testFreezePattern() { client().prepareIndex("test-idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); createIndex("test-idx-1", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("test-idx-1", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("test-idx")).actionGet()); assertIndexFrozen("test-idx"); IndicesStatsResponse index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); @@ -228,7 +227,7 @@ public void testFreezePattern() { index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); assertEquals(1, index.getTotal().refresh.getTotal()); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test*")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("test*")).actionGet()); assertIndexFrozen("test-idx"); assertIndexFrozen("test-idx-1"); index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); @@ -265,7 +264,7 @@ public void testCanMatch() throws IOException { new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null))); } - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("index")).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); @@ -296,7 +295,7 @@ public void testCanMatch() throws IOException { public void testWriteToFrozenIndex() { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx")).actionGet()); assertIndexFrozen("idx"); expectThrows(ClusterBlockException.class, () -> client().prepareIndex("idx", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get()); @@ -306,7 +305,7 @@ public void testIgnoreUnavailable() { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); createIndex("idx-close", Settings.builder().put("index.number_of_shards", 1).build()); assertAcked(client().admin().indices().prepareClose("idx-close")); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx*", "not_available") + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx*", "not_available") .indicesOptions(IndicesOptions.fromParameters(null, "true", null, null, IndicesOptions.strictExpandOpen()))).actionGet()); assertIndexFrozen("idx"); assertEquals(IndexMetaData.State.CLOSE, @@ -315,17 +314,17 @@ public void testIgnoreUnavailable() { public void testUnfreezeClosedIndex() { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx")).actionGet()); assertAcked(client().admin().indices().prepareClose("idx")); assertEquals(IndexMetaData.State.CLOSE, client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); expectThrows(IndexNotFoundException.class, - () -> client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("id*").setFreeze(false) + () -> client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("id*").setFreeze(false) .indicesOptions(new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), EnumSet.of(IndicesOptions.WildcardStates.OPEN)))).actionGet()); // we don't resolve to closed indices assertAcked(client().execute(FreezeIndexAction.INSTANCE, - new TransportFreezeIndexAction.FreezeRequest("idx").setFreeze(false)).actionGet()); + new FreezeRequest("idx").setFreeze(false)).actionGet()); assertEquals(IndexMetaData.State.OPEN, client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); } @@ -338,7 +337,7 @@ public void testFreezeIndexIncreasesIndexSettingsVersion() { final long settingsVersion = client().admin().cluster().prepareState().get() .getState().metaData().index(index).getSettingsVersion(); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(index)).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(index)).actionGet()); assertIndexFrozen(index); assertThat(client().admin().cluster().prepareState().get().getState().metaData().index(index).getSettingsVersion(), greaterThan(settingsVersion)); @@ -366,7 +365,7 @@ public void testFreezeEmptyIndexWithTranslogOps() throws Exception { assertThat(indexService.getShard(0).getLastKnownGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); }); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexName)).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); assertIndexFrozen(indexName); } @@ -382,7 +381,7 @@ public void testRecoveryState() { assertThat(indexResponse.status(), is(RestStatus.CREATED)); } - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexName)).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); assertIndexFrozen(indexName); final IndexMetaData indexMetaData = client().admin().cluster().prepareState().get().getState().metaData().index(indexName); @@ -427,7 +426,7 @@ public void testTranslogStats() { assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(uncommittedOps)); - assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexName)).actionGet()); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); assertIndexFrozen(indexName); IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java similarity index 100% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java rename to x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java diff --git a/x-pack/plugin/graph/qa/with-security/build.gradle b/x-pack/plugin/graph/qa/with-security/build.gradle index 1a979d83355c9..578f910146748 100644 --- a/x-pack/plugin/graph/qa/with-security/build.gradle +++ b/x-pack/plugin/graph/qa/with-security/build.gradle @@ -16,7 +16,7 @@ task copyGraphRestTests(type: Copy) { integTest.dependsOn copyGraphRestTests testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index e6c7dd59efd9e..c0c8822ac232f 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -57,7 +57,6 @@ import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; /** * Performs a series of elasticsearch queries and aggregations to explore @@ -85,7 +84,7 @@ protected boolean lessThan(Vertex a, Vertex b) { @Inject public TransportGraphExploreAction(ThreadPool threadPool, NodeClient client, TransportService transportService, ActionFilters actionFilters, XPackLicenseState licenseState) { - super(GraphExploreAction.NAME, transportService, actionFilters, (Supplier)GraphExploreRequest::new); + super(GraphExploreAction.NAME, transportService, actionFilters, GraphExploreRequest::new); this.threadPool = threadPool; this.client = client; this.licenseState = licenseState; diff --git a/x-pack/plugin/ilm/build.gradle b/x-pack/plugin/ilm/build.gradle index 4e0c44ef8e4ab..17b66741a7a9f 100644 --- a/x-pack/plugin/ilm/build.gradle +++ b/x-pack/plugin/ilm/build.gradle @@ -5,7 +5,7 @@ apply plugin: 'elasticsearch.esplugin' esplugin { name 'x-pack-ilm' description 'Elasticsearch Expanded Pack Plugin - Index Lifecycle Management' - classname 'org.elasticsearch.xpack.indexlifecycle.IndexLifecycle' + classname 'org.elasticsearch.xpack.ilm.IndexLifecycle' extendedPlugins = ['x-pack-core'] hasNativeController false requiresKeystore true diff --git a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle index 86b82208ca4c5..8ba0a758f7371 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle @@ -21,7 +21,7 @@ task 'leader-cluster'(type: RestIntegTestTask) { } testClusters.'leader-cluster' { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'path.repo', repoDir.absolutePath setting 'xpack.ilm.enabled', 'true' setting 'xpack.ccr.enabled', 'true' @@ -48,7 +48,7 @@ task 'follow-cluster'(type: RestIntegTestTask) { } testClusters.'follow-cluster' { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'path.repo', repoDir.absolutePath setting 'xpack.ilm.enabled', 'true' setting 'xpack.ccr.enabled', 'true' diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java similarity index 99% rename from x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java rename to x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java index 14e70446ab120..2051250874758 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; diff --git a/x-pack/plugin/ilm/qa/multi-node/build.gradle b/x-pack/plugin/ilm/qa/multi-node/build.gradle index 1a72ee5cf6f17..77464b031aa15 100644 --- a/x-pack/plugin/ilm/qa/multi-node/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-node/build.gradle @@ -16,7 +16,7 @@ integTest.runner { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' numberOfNodes = 4 setting 'path.repo', repoDir.absolutePath diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/ChangePolicyforIndexIT.java similarity index 99% rename from x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java rename to x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/ChangePolicyforIndexIT.java index 1b93fed4fe334..09477be0f9d9b 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/ChangePolicyforIndexIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java similarity index 99% rename from x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java rename to x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 757869640ebad..21ee948299d1d 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; @@ -21,7 +21,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.FrozenEngine; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; @@ -542,7 +541,7 @@ public void testFreezeAction() throws Exception { assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); assertThat(settings.get(IndexSettings.INDEX_SEARCH_THROTTLED.getKey()), equalTo("true")); - assertThat(settings.get(FrozenEngine.INDEX_FROZEN.getKey()), equalTo("true")); + assertThat(settings.get("index.frozen"), equalTo("true")); }); } @@ -580,7 +579,7 @@ public void testFreezeDuringSnapshot() throws Exception { assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); assertThat(settings.get(IndexSettings.INDEX_SEARCH_THROTTLED.getKey()), equalTo("true")); - assertThat(settings.get(FrozenEngine.INDEX_FROZEN.getKey()), equalTo("true")); + assertThat(settings.get("index.frozen"), equalTo("true")); }, 2, TimeUnit.MINUTES); // assert that snapshot is still in progress and clean up assertThat(getSnapshotState("snapshot"), equalTo("SUCCESS")); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java new file mode 100644 index 0000000000000..5ec97881eed4a --- /dev/null +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java @@ -0,0 +1,331 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.startsWith; + +public class SnapshotLifecycleIT extends ESRestTestCase { + + public void testMissingRepo() throws Exception { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("test-policy", "snap", + "*/1 * * * * ?", "missing-repo", Collections.emptyMap()); + + Request putLifecycle = new Request("PUT", "/_slm/policy/test-policy"); + XContentBuilder lifecycleBuilder = JsonXContent.contentBuilder(); + policy.toXContent(lifecycleBuilder, ToXContent.EMPTY_PARAMS); + putLifecycle.setJsonEntity(Strings.toString(lifecycleBuilder)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(putLifecycle)); + Response resp = e.getResponse(); + assertThat(resp.getStatusLine().getStatusCode(), equalTo(400)); + String jsonError = EntityUtils.toString(resp.getEntity()); + assertThat(jsonError, containsString("\"type\":\"illegal_argument_exception\"")); + assertThat(jsonError, containsString("\"reason\":\"no such repository [missing-repo]\"")); + } + + @SuppressWarnings("unchecked") + public void testFullPolicySnapshot() throws Exception { + final String indexName = "test"; + final String policyName = "test-policy"; + final String repoId = "my-repo"; + int docCount = randomIntBetween(10, 50); + List indexReqs = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + index(client(), indexName, "" + i, "foo", "bar"); + } + + // Create a snapshot repo + inializeRepo(repoId); + + createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoId, indexName, true); + + // Check that the snapshot was actually taken + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/_all")); + Map snapshotResponseMap; + try (InputStream is = response.getEntity().getContent()) { + snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + assertThat(snapshotResponseMap.size(), greaterThan(0)); + List> snapResponse = ((List>) snapshotResponseMap.get("responses")).stream() + .findFirst() + .map(m -> (List>) m.get("snapshots")) + .orElseThrow(() -> new AssertionError("failed to find snapshot response in " + snapshotResponseMap)); + assertThat(snapResponse.size(), greaterThan(0)); + assertThat(snapResponse.get(0).get("snapshot").toString(), startsWith("snap-")); + assertThat(snapResponse.get(0).get("indices"), equalTo(Collections.singletonList(indexName))); + Map metadata = (Map) snapResponse.get(0).get("metadata"); + assertNotNull(metadata); + assertThat(metadata.get("policy"), equalTo(policyName)); + assertHistoryIsPresent(policyName, true, repoId); + + // Check that the last success date was written to the cluster state + Request getReq = new Request("GET", "/_slm/policy/" + policyName); + Response policyMetadata = client().performRequest(getReq); + Map policyResponseMap; + try (InputStream is = policyMetadata.getEntity().getContent()) { + policyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + Map policyMetadataMap = (Map) policyResponseMap.get(policyName); + Map lastSuccessObject = (Map) policyMetadataMap.get("last_success"); + assertNotNull(lastSuccessObject); + Long lastSuccess = (Long) lastSuccessObject.get("time"); + Long modifiedDate = (Long) policyMetadataMap.get("modified_date_millis"); + assertNotNull(lastSuccess); + assertNotNull(modifiedDate); + assertThat(lastSuccess, greaterThan(modifiedDate)); + + String lastSnapshotName = (String) lastSuccessObject.get("snapshot_name"); + assertThat(lastSnapshotName, startsWith("snap-")); + + assertHistoryIsPresent(policyName, true, repoId); + }); + + Request delReq = new Request("DELETE", "/_slm/policy/" + policyName); + assertOK(client().performRequest(delReq)); + + // It's possible there could have been a snapshot in progress when the + // policy is deleted, so wait for it to be finished + assertBusy(() -> { + assertThat(wipeSnapshots().size(), equalTo(0)); + }); + } + + @SuppressWarnings("unchecked") + public void testPolicyFailure() throws Exception { + final String policyName = "test-policy"; + final String repoName = "test-repo"; + final String indexPattern = "index-doesnt-exist"; + inializeRepo(repoName); + + // Create a policy with ignore_unvailable: false and an index that doesn't exist + createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoName, indexPattern, false); + + assertBusy(() -> { + // Check that the failure is written to the cluster state + Request getReq = new Request("GET", "/_slm/policy/" + policyName); + Response policyMetadata = client().performRequest(getReq); + try (InputStream is = policyMetadata.getEntity().getContent()) { + Map responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + Map policyMetadataMap = (Map) responseMap.get(policyName); + Map lastFailureObject = (Map) policyMetadataMap.get("last_failure"); + assertNotNull(lastFailureObject); + + Long lastFailure = (Long) lastFailureObject.get("time"); + Long modifiedDate = (Long) policyMetadataMap.get("modified_date_millis"); + assertNotNull(lastFailure); + assertNotNull(modifiedDate); + assertThat(lastFailure, greaterThan(modifiedDate)); + + String lastFailureInfo = (String) lastFailureObject.get("details"); + assertNotNull(lastFailureInfo); + assertThat(lastFailureInfo, containsString("no such index [index-doesnt-exist]")); + + String snapshotName = (String) lastFailureObject.get("snapshot_name"); + assertNotNull(snapshotName); + assertThat(snapshotName, startsWith("snap-")); + } + assertHistoryIsPresent(policyName, false, repoName); + }); + + Request delReq = new Request("DELETE", "/_slm/policy/" + policyName); + assertOK(client().performRequest(delReq)); + } + + public void testPolicyManualExecution() throws Exception { + final String indexName = "test"; + final String policyName = "test-policy"; + final String repoId = "my-repo"; + int docCount = randomIntBetween(10, 50); + List indexReqs = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + index(client(), indexName, "" + i, "foo", "bar"); + } + + // Create a snapshot repo + inializeRepo(repoId); + + createSnapshotPolicy(policyName, "snap", "1 2 3 4 5 ?", repoId, indexName, true); + + ResponseException badResp = expectThrows(ResponseException.class, + () -> client().performRequest(new Request("PUT", "/_slm/policy/" + policyName + "-bad/_execute"))); + assertThat(EntityUtils.toString(badResp.getResponse().getEntity()), + containsString("no such snapshot lifecycle policy [" + policyName + "-bad]")); + + Response goodResp = client().performRequest(new Request("PUT", "/_slm/policy/" + policyName + "/_execute")); + + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, EntityUtils.toByteArray(goodResp.getEntity()))) { + final String snapshotName = parser.mapStrings().get("snapshot_name"); + + // Check that the executed snapshot is created + assertBusy(() -> { + try { + Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName)); + Map snapshotResponseMap; + try (InputStream is = response.getEntity().getContent()) { + snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + assertThat(snapshotResponseMap.size(), greaterThan(0)); + final Map metadata = extractMetadata(snapshotResponseMap, snapshotName); + assertNotNull(metadata); + assertThat(metadata.get("policy"), equalTo(policyName)); + assertHistoryIsPresent(policyName, true, repoId); + } catch (ResponseException e) { + fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity())); + } + }); + } + + Request delReq = new Request("DELETE", "/_slm/policy/" + policyName); + assertOK(client().performRequest(delReq)); + + // It's possible there could have been a snapshot in progress when the + // policy is deleted, so wait for it to be finished + assertBusy(() -> { + assertThat(wipeSnapshots().size(), equalTo(0)); + }); + } + + @SuppressWarnings("unchecked") + private static Map extractMetadata(Map snapshotResponseMap, String snapshotPrefix) { + List> snapResponse = ((List>) snapshotResponseMap.get("responses")).stream() + .findFirst() + .map(m -> (List>) m.get("snapshots")) + .orElseThrow(() -> new AssertionError("failed to find snapshot response in " + snapshotResponseMap)); + return snapResponse.stream() + .filter(snapshot -> ((String) snapshot.get("snapshot")).startsWith(snapshotPrefix)) + .map(snapshot -> (Map) snapshot.get("metadata")) + .findFirst() + .orElse(null); + } + + // This method should be called inside an assertBusy, it has no retry logic of its own + private void assertHistoryIsPresent(String policyName, boolean success, String repository) throws IOException { + final Request historySearchRequest = new Request("GET", ".slm-history*/_search"); + historySearchRequest.setJsonEntity("{\n" + + " \"query\": {\n" + + " \"bool\": {\n" + + " \"must\": [\n" + + " {\n" + + " \"term\": {\n" + + " \"policy\": \"" + policyName + "\"\n" + + " }\n" + + " },\n" + + " {\n" + + " \"term\": {\n" + + " \"success\": " + success + "\n" + + " }\n" + + " },\n" + + " {\n" + + " \"term\": {\n" + + " \"repository\": \"" + repository + "\"\n" + + " }\n" + + " },\n" + + " {\n" + + " \"term\": {\n" + + " \"operation\": \"CREATE\"\n" + + " }\n" + + " }\n" + + " ]\n" + + " }\n" + + " }\n" + + "}"); + Response historyResponse; + try { + historyResponse = client().performRequest(historySearchRequest); + Map historyResponseMap; + try (InputStream is = historyResponse.getEntity().getContent()) { + historyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + assertThat((int)((Map) ((Map) historyResponseMap.get("hits")).get("total")).get("value"), + greaterThanOrEqualTo(1)); + } catch (ResponseException e) { + // Throw AssertionError instead of an exception if the search fails so that assertBusy works as expected + logger.error(e); + fail("failed to perform search:" + e.getMessage()); + } + } + + private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String repoId, + String indexPattern, boolean ignoreUnavailable) throws IOException { + Map snapConfig = new HashMap<>(); + snapConfig.put("indices", Collections.singletonList(indexPattern)); + snapConfig.put("ignore_unavailable", ignoreUnavailable); + if (randomBoolean()) { + Map metadata = new HashMap<>(); + int fieldCount = randomIntBetween(2,5); + for (int i = 0; i < fieldCount; i++) { + metadata.put(randomValueOtherThanMany(key -> "policy".equals(key) || metadata.containsKey(key), + () -> randomAlphaOfLength(5)), randomAlphaOfLength(4)); + } + } + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyName, snapshotNamePattern, schedule, repoId, snapConfig); + + Request putLifecycle = new Request("PUT", "/_slm/policy/" + policyName); + XContentBuilder lifecycleBuilder = JsonXContent.contentBuilder(); + policy.toXContent(lifecycleBuilder, ToXContent.EMPTY_PARAMS); + putLifecycle.setJsonEntity(Strings.toString(lifecycleBuilder)); + assertOK(client().performRequest(putLifecycle)); + } + + private void inializeRepo(String repoName) throws IOException { + Request request = new Request("PUT", "/_snapshot/" + repoName); + request.setJsonEntity(Strings + .toString(JsonXContent.contentBuilder() + .startObject() + .field("type", "fs") + .startObject("settings") + .field("compress", randomBoolean()) + .field("location", System.getProperty("tests.path.repo")) + .field("max_snapshot_bytes_per_sec", "256b") + .endObject() + .endObject())); + assertOK(client().performRequest(request)); + } + + private static void index(RestClient client, String index, String id, Object... fields) throws IOException { + XContentBuilder document = jsonBuilder().startObject(); + for (int i = 0; i < fields.length; i += 2) { + document.field((String) fields[i], fields[i + 1]); + } + document.endObject(); + final Request request = new Request("POST", "/" + index + "/_doc/" + id); + request.setJsonEntity(Strings.toString(document)); + assertOK(client.performRequest(request)); + } +} diff --git a/x-pack/plugin/ilm/qa/rest/build.gradle b/x-pack/plugin/ilm/qa/rest/build.gradle index 8fbe1fbdff07e..8aaa6c70e3a0a 100644 --- a/x-pack/plugin/ilm/qa/rest/build.gradle +++ b/x-pack/plugin/ilm/qa/rest/build.gradle @@ -23,7 +23,7 @@ task restTest(type: RestIntegTestTask) { } testClusters.restTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' setting 'xpack.ilm.enabled', 'true' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java b/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRestIT.java similarity index 97% rename from x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java rename to x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRestIT.java index f784e2b940bfe..bc753540c9e91 100644 --- a/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java +++ b/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRestIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml index dff5ed955ff10..baa051103d5fa 100644 --- a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml @@ -104,6 +104,7 @@ teardown: - match: { indices.my_index.action: "complete" } - match: { indices.my_index.step: "complete" } - is_true: indices.my_index.phase_time_millis + - is_true: indices.my_index.age - is_false: indices.my_index.failed_step - is_false: indices.my_index.step_info - is_false: indices.my_index.phase_execution @@ -126,6 +127,7 @@ teardown: - match: { indices.my_index.action: "complete" } - match: { indices.my_index.step: "complete" } - is_true: indices.my_index.phase_time_millis + - is_true: indices.my_index.age - is_false: indices.my_index.failed_step - is_false: indices.my_index.step_info - is_false: indices.my_index.phase_execution @@ -137,6 +139,7 @@ teardown: - match: { indices.my_index2.action: "complete" } - match: { indices.my_index2.step: "complete" } - is_true: indices.my_index2.phase_time_millis + - is_true: indices.my_index2.age - is_false: indices.my_index2.failed_step - is_false: indices.my_index2.step_info - is_false: indices.my_index2.phase_execution @@ -159,6 +162,7 @@ teardown: - match: { indices.my_index.action: "complete" } - match: { indices.my_index.step: "complete" } - is_true: indices.my_index.phase_time_millis + - is_true: indices.my_index.age - is_false: indices.my_index.failed_step - is_false: indices.my_index.step_info - is_false: indices.my_index.phase_execution @@ -170,6 +174,7 @@ teardown: - match: { indices.my_index2.action: "complete" } - match: { indices.my_index2.step: "complete" } - is_true: indices.my_index2.phase_time_millis + - is_true: indices.my_index2.age - is_false: indices.my_index2.failed_step - is_false: indices.my_index2.step_info - is_false: indices.my_index2.phase_execution @@ -181,6 +186,7 @@ teardown: - match: { indices.another_index.action: "complete" } - match: { indices.another_index.step: "complete" } - is_true: indices.another_index.phase_time_millis + - is_true: indices.another_index.age - is_false: indices.another_index.failed_step - is_false: indices.another_index.step_info - is_false: indices.another_index.phase_execution @@ -191,6 +197,7 @@ teardown: - is_false: indices.unmanaged_index.phase - is_false: indices.unmanaged_index.action - is_false: indices.unmanaged_index.step + - is_false: indices.unmanaged.age - is_false: indices.another_index.failed_step - is_false: indices.another_index.step_info @@ -208,6 +215,7 @@ teardown: - is_false: indices.unmanaged_index.action - is_false: indices.unmanaged_index.step - is_false: indices.unmanaged_index.phase_execution + - is_false: indices.unmanaged.age - is_false: indices.another_index.failed_step - is_false: indices.another_index.step_info - is_false: indices.my_index diff --git a/x-pack/plugin/ilm/qa/with-security/build.gradle b/x-pack/plugin/ilm/qa/with-security/build.gradle index 84685c3da7c88..125b35fb5dcb5 100644 --- a/x-pack/plugin/ilm/qa/with-security/build.gradle +++ b/x-pack/plugin/ilm/qa/with-security/build.gradle @@ -17,7 +17,7 @@ integTest { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.ilm.enabled', 'true' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' diff --git a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java index d8c50c470664e..7535f8aaa128b 100644 --- a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -3,16 +3,28 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.security; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.elasticsearch.client.Node; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.snapshotlifecycle.DeleteSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyResponse; +import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.PutSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.snapshotlifecycle.SnapshotLifecyclePolicy; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -23,8 +35,9 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; @@ -36,6 +49,8 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; import static java.util.Collections.singletonMap; @@ -45,6 +60,7 @@ import static org.hamcrest.Matchers.is; public class PermissionsIT extends ESRestTestCase { + private static final String jsonDoc = "{ \"name\" : \"elasticsearch\", \"body\": \"foo bar\" }"; private String deletePolicy = "deletePolicy"; @@ -127,6 +143,95 @@ public void testCanManageIndexWithNoPermissions() throws Exception { }); } + public void testSLMWithPermissions() throws Exception { + createIndexAsAdmin("index", Settings.builder().put("index.number_of_replicas", 0).build(), ""); + + // Set up two roles and users, one for reading SLM, another for managing SLM + Request roleRequest = new Request("PUT", "/_security/role/slm-read"); + roleRequest.setJsonEntity("{ \"cluster\": [\"read_slm\"] }"); + assertOK(adminClient().performRequest(roleRequest)); + roleRequest = new Request("PUT", "/_security/role/slm-manage"); + roleRequest.setJsonEntity("{ \"cluster\": [\"manage_slm\", \"create_snapshot\"]," + + "\"indices\": [{ \"names\": [\".slm-history*\"],\"privileges\": [\"all\"] }] }"); + assertOK(adminClient().performRequest(roleRequest)); + + createUser("slm_admin", "slm-pass", "slm-manage"); + createUser("slm_user", "slm-user-pass", "slm-read"); + + final HighLevelClient hlAdminClient = new HighLevelClient(adminClient()); + + // Build two high level clients, each using a different user + final RestClientBuilder adminBuilder = RestClient.builder(adminClient().getNodes().toArray(new Node[0])); + final String adminToken = basicAuthHeaderValue("slm_admin", new SecureString("slm-pass".toCharArray())); + configureClient(adminBuilder, Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", adminToken) + .build()); + adminBuilder.setStrictDeprecationMode(true); + final RestHighLevelClient adminHLRC = new RestHighLevelClient(adminBuilder); + + final RestClientBuilder userBuilder = RestClient.builder(adminClient().getNodes().toArray(new Node[0])); + final String userToken = basicAuthHeaderValue("slm_user", new SecureString("slm-user-pass".toCharArray())); + configureClient(userBuilder, Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", userToken) + .build()); + userBuilder.setStrictDeprecationMode(true); + final RestHighLevelClient readHlrc = new RestHighLevelClient(userBuilder); + + PutRepositoryRequest repoRequest = new PutRepositoryRequest(); + + Settings.Builder settingsBuilder = Settings.builder().put("location", "."); + repoRequest.settings(settingsBuilder); + repoRequest.name("my_repository"); + repoRequest.type(FsRepository.TYPE); + org.elasticsearch.action.support.master.AcknowledgedResponse response = + hlAdminClient.snapshot().createRepository(repoRequest, RequestOptions.DEFAULT); + assertTrue(response.isAcknowledged()); + + Map config = new HashMap<>(); + config.put("indices", Collections.singletonList("index")); + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "policy_id", "name", "1 2 3 * * ?", "my_repository", config); + PutSnapshotLifecyclePolicyRequest request = new PutSnapshotLifecyclePolicyRequest(policy); + + expectThrows(ElasticsearchStatusException.class, + () -> readHlrc.indexLifecycle().putSnapshotLifecyclePolicy(request, RequestOptions.DEFAULT)); + + adminHLRC.indexLifecycle().putSnapshotLifecyclePolicy(request, RequestOptions.DEFAULT); + + GetSnapshotLifecyclePolicyRequest getRequest = new GetSnapshotLifecyclePolicyRequest("policy_id"); + readHlrc.indexLifecycle().getSnapshotLifecyclePolicy(getRequest, RequestOptions.DEFAULT); + adminHLRC.indexLifecycle().getSnapshotLifecyclePolicy(getRequest, RequestOptions.DEFAULT); + + ExecuteSnapshotLifecyclePolicyRequest executeRequest = new ExecuteSnapshotLifecyclePolicyRequest("policy_id"); + expectThrows(ElasticsearchStatusException.class, () -> + readHlrc.indexLifecycle().executeSnapshotLifecyclePolicy(executeRequest, RequestOptions.DEFAULT)); + + ExecuteSnapshotLifecyclePolicyResponse executeResp = + adminHLRC.indexLifecycle().executeSnapshotLifecyclePolicy(executeRequest, RequestOptions.DEFAULT); + + DeleteSnapshotLifecyclePolicyRequest deleteRequest = new DeleteSnapshotLifecyclePolicyRequest("policy_id"); + expectThrows(ElasticsearchStatusException.class, () -> + readHlrc.indexLifecycle().deleteSnapshotLifecyclePolicy(deleteRequest, RequestOptions.DEFAULT)); + + adminHLRC.indexLifecycle().deleteSnapshotLifecyclePolicy(deleteRequest, RequestOptions.DEFAULT); + + // Delete snapshot to clean up and make sure it's not on-going. + // This is inside an assertBusy because the snapshot may not + // yet exist (in which case it throws an error) + assertBusy(() -> { + try { + DeleteSnapshotRequest delReq = new DeleteSnapshotRequest("my_repository", executeResp.getSnapshotName()); + hlAdminClient.snapshot().delete(delReq, RequestOptions.DEFAULT); + } catch (ElasticsearchStatusException e) { + fail("got exception: " + e); + } + }); + + hlAdminClient.close(); + readHlrc.close(); + adminHLRC.close(); + } + public void testCanViewExplainOnUnmanagedIndex() throws Exception { createIndexAsAdmin("view-only-ilm", indexSettingsWithPolicy, ""); Request request = new Request("GET", "/view-only-ilm/_ilm/explain"); @@ -138,7 +243,7 @@ public void testCanViewExplainOnUnmanagedIndex() throws Exception { * Tests when the user is limited by alias of an index is able to write to index * which was rolled over by an ILM policy. */ - @TestLogging("org.elasticsearch:DEBUG") + @TestIssueLogging(value = "org.elasticsearch:DEBUG", issueUrl = "https://github.com/elastic/elasticsearch/issues/41440") public void testWhenUserLimitedByOnlyAliasOfIndexCanWriteToIndexWhichWasRolledoverByILMPolicy() throws IOException, InterruptedException { /* @@ -263,4 +368,11 @@ private void refresh(String index) throws IOException { Request request = new Request("POST", "/" + index + "/_refresh"); assertOK(adminClient().performRequest(request)); } + + private static class HighLevelClient extends RestHighLevelClient { + private HighLevelClient(RestClient restClient) { + super(restClient, (client) -> {}, Collections.emptyList()); + } + } + } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java similarity index 99% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java index 131330bcb9c99..ac798a3565e12 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java similarity index 65% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index 2a03a1c834fac..e27e99a8acabf 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -3,9 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.Client; @@ -22,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.ActionPlugin; @@ -58,27 +60,45 @@ import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestDeleteLifecycleAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestExplainLifecycleAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestGetLifecycleAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestGetStatusAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestMoveToStepAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestPutLifecycleAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestRemoveIndexLifecyclePolicyAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestRetryAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestStartILMAction; -import org.elasticsearch.xpack.indexlifecycle.action.RestStopAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportDeleteLifecycleAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportExplainLifecycleAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportGetLifecycleAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportGetStatusAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportMoveToStepAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportPutLifecycleAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportRemoveIndexLifecyclePolicyAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportRetryAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportStartILMAction; -import org.elasticsearch.xpack.indexlifecycle.action.TransportStopILMAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.DeleteSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.ExecuteSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.GetSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.PutSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryStore; +import org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotLifecycleTemplateRegistry; +import org.elasticsearch.xpack.ilm.action.RestDeleteLifecycleAction; +import org.elasticsearch.xpack.ilm.action.RestExplainLifecycleAction; +import org.elasticsearch.xpack.ilm.action.RestGetLifecycleAction; +import org.elasticsearch.xpack.ilm.action.RestGetStatusAction; +import org.elasticsearch.xpack.ilm.action.RestMoveToStepAction; +import org.elasticsearch.xpack.ilm.action.RestPutLifecycleAction; +import org.elasticsearch.xpack.ilm.action.RestRemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.ilm.action.RestRetryAction; +import org.elasticsearch.xpack.ilm.action.RestStartILMAction; +import org.elasticsearch.xpack.ilm.action.RestStopAction; +import org.elasticsearch.xpack.ilm.action.TransportDeleteLifecycleAction; +import org.elasticsearch.xpack.ilm.action.TransportExplainLifecycleAction; +import org.elasticsearch.xpack.ilm.action.TransportGetLifecycleAction; +import org.elasticsearch.xpack.ilm.action.TransportGetStatusAction; +import org.elasticsearch.xpack.ilm.action.TransportMoveToStepAction; +import org.elasticsearch.xpack.ilm.action.TransportPutLifecycleAction; +import org.elasticsearch.xpack.ilm.action.TransportRemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.ilm.action.TransportRetryAction; +import org.elasticsearch.xpack.ilm.action.TransportStartILMAction; +import org.elasticsearch.xpack.ilm.action.TransportStopILMAction; +import org.elasticsearch.xpack.slm.SnapshotLifecycleService; +import org.elasticsearch.xpack.slm.SnapshotLifecycleTask; +import org.elasticsearch.xpack.slm.action.RestDeleteSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.RestExecuteSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.RestGetSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.RestPutSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.TransportDeleteSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.TransportExecuteSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.TransportGetSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.TransportPutSnapshotLifecycleAction; +import java.io.IOException; import java.time.Clock; import java.util.Arrays; import java.util.Collection; @@ -90,6 +110,8 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { private final SetOnce indexLifecycleInitialisationService = new SetOnce<>(); + private final SetOnce snapshotLifecycleService = new SetOnce<>(); + private final SetOnce snapshotHistoryStore = new SetOnce<>(); private Settings settings; private boolean enabled; @@ -109,7 +131,8 @@ public List> getSettings() { LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING, LifecycleSettings.LIFECYCLE_NAME_SETTING, LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING, - RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING); + RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING, + LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING); } @Override @@ -122,12 +145,17 @@ public Collection createComponents(Client client, ClusterService cluster } indexLifecycleInitialisationService.set(new IndexLifecycleService(settings, client, clusterService, threadPool, getClock(), System::currentTimeMillis, xContentRegistry)); - return Collections.singletonList(indexLifecycleInitialisationService.get()); + SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry(settings, clusterService, threadPool, + client, xContentRegistry); + snapshotHistoryStore.set(new SnapshotHistoryStore(settings, client, getClock().getZone())); + snapshotLifecycleService.set(new SnapshotLifecycleService(settings, + () -> new SnapshotLifecycleTask(client, clusterService, snapshotHistoryStore.get()), clusterService, getClock())); + return Arrays.asList(indexLifecycleInitialisationService.get(), snapshotLifecycleService.get(), snapshotHistoryStore.get()); } @Override public List getNamedWriteables() { - return Arrays.asList(); + return Collections.emptyList(); } @Override @@ -136,6 +164,8 @@ public List getNa // Custom Metadata new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexLifecycleMetadata.TYPE), parser -> IndexLifecycleMetadata.PARSER.parse(parser, null)), + new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(SnapshotLifecycleMetadata.TYPE), + parser -> SnapshotLifecycleMetadata.PARSER.parse(parser, null)), // Lifecycle Types new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), (p, c) -> TimeseriesLifecycleType.INSTANCE), @@ -169,7 +199,12 @@ public List getRestHandlers(Settings settings, RestController restC new RestRetryAction(settings, restController), new RestStopAction(settings, restController), new RestStartILMAction(settings, restController), - new RestGetStatusAction(settings, restController) + new RestGetStatusAction(settings, restController), + // Snapshot lifecycle actions + new RestPutSnapshotLifecycleAction(settings, restController), + new RestDeleteSnapshotLifecycleAction(settings, restController), + new RestGetSnapshotLifecycleAction(settings, restController), + new RestExecuteSnapshotLifecycleAction(settings, restController) ); } @@ -193,15 +228,21 @@ public List getRestHandlers(Settings settings, RestController restC new ActionHandler<>(StartILMAction.INSTANCE, TransportStartILMAction.class), new ActionHandler<>(StopILMAction.INSTANCE, TransportStopILMAction.class), new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class), + // Snapshot lifecycle actions + new ActionHandler<>(PutSnapshotLifecycleAction.INSTANCE, TransportPutSnapshotLifecycleAction.class), + new ActionHandler<>(DeleteSnapshotLifecycleAction.INSTANCE, TransportDeleteSnapshotLifecycleAction.class), + new ActionHandler<>(GetSnapshotLifecycleAction.INSTANCE, TransportGetSnapshotLifecycleAction.class), + new ActionHandler<>(ExecuteSnapshotLifecycleAction.INSTANCE, TransportExecuteSnapshotLifecycleAction.class), usageAction, infoAction); } @Override public void close() { - IndexLifecycleService lifecycleService = indexLifecycleInitialisationService.get(); - if (lifecycleService != null) { - lifecycleService.close(); + try { + IOUtils.close(indexLifecycleInitialisationService.get(), snapshotLifecycleService.get()); + } catch (IOException e) { + throw new ElasticsearchException("unable to close index lifecycle services", e); } } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInfoTransportAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportAction.java similarity index 97% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInfoTransportAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportAction.java index 65fa28458e759..8356bf1ebe5ab 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInfoTransportAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportAction.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.common.inject.Inject; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java similarity index 99% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index 05ad342f3e779..79e524d06a7a1 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java similarity index 99% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index a159b9f965e2a..97b4bf5504e52 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleUsageTransportAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleUsageTransportAction.java similarity index 98% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleUsageTransportAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleUsageTransportAction.java index 91fd744d9aabb..9882ec9ef7a3b 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleUsageTransportAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleUsageTransportAction.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/LifecyclePolicySecurityClient.java similarity index 97% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/LifecyclePolicySecurityClient.java index e116223304e4e..5025dc99d76eb 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/LifecyclePolicySecurityClient.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTask.java similarity index 98% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTask.java index 5af1a05309e7e..760ce44891e4b 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTask.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterState; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTask.java similarity index 98% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTask.java index 246cda6192fab..303adf867dd23 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTask.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java similarity index 52% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java index 0cf24300831cd..4886b8fd75b93 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; public class OperationModeUpdateTask extends ClusterStateUpdateTask { private static final Logger logger = LogManager.getLogger(OperationModeUpdateTask.class); @@ -27,6 +28,13 @@ OperationMode getOperationMode() { @Override public ClusterState execute(ClusterState currentState) { + ClusterState newState = currentState; + newState = updateILMState(newState); + newState = updateSLMState(newState); + return newState; + } + + private ClusterState updateILMState(final ClusterState currentState) { IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); if (currentMetadata != null && currentMetadata.getOperationMode().isValidChange(mode) == false) { return currentState; @@ -41,12 +49,33 @@ public ClusterState execute(ClusterState currentState) { newMode = currentMetadata.getOperationMode(); } - ClusterState.Builder builder = new ClusterState.Builder(currentState); - MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); - metadataBuilder.putCustom(IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata(currentMetadata.getPolicyMetadatas(), newMode)); - builder.metaData(metadataBuilder.build()); - return builder.build(); + return ClusterState.builder(currentState) + .metaData(MetaData.builder(currentState.metaData()) + .putCustom(IndexLifecycleMetadata.TYPE, + new IndexLifecycleMetadata(currentMetadata.getPolicyMetadatas(), newMode))) + .build(); + } + + private ClusterState updateSLMState(final ClusterState currentState) { + SnapshotLifecycleMetadata currentMetadata = currentState.metaData().custom(SnapshotLifecycleMetadata.TYPE); + if (currentMetadata != null && currentMetadata.getOperationMode().isValidChange(mode) == false) { + return currentState; + } else if (currentMetadata == null) { + currentMetadata = SnapshotLifecycleMetadata.EMPTY; + } + + final OperationMode newMode; + if (currentMetadata.getOperationMode().isValidChange(mode)) { + newMode = mode; + } else { + newMode = currentMetadata.getOperationMode(); + } + + return ClusterState.builder(currentState) + .metaData(MetaData.builder(currentState.metaData()) + .putCustom(SnapshotLifecycleMetadata.TYPE, + new SnapshotLifecycleMetadata(currentMetadata.getSnapshotConfigurations(), newMode))) + .build(); } @Override diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java similarity index 99% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java index b1c1785cb1cf6..0a6c529867735 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTask.java similarity index 98% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTask.java index 72c7aa81b9d77..ae04e5767b024 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTask.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterState; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/TimeValueSchedule.java similarity index 96% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/TimeValueSchedule.java index 436f8637a0228..704a3071a870d 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/TimeValueSchedule.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine.Schedule; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java similarity index 96% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java index 081e7d1565f79..3a65cbec9b358 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java similarity index 97% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java index 96be5f0fc0337..2d6a451b6889a 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java similarity index 97% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java index b518fe2f08698..f1237d42fe06a 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java similarity index 96% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java index be2d16ee0be76..a262215d7201f 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java similarity index 96% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java index 41228041679e7..63b4f068a67bd 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java @@ -5,7 +5,7 @@ * */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java similarity index 97% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java index aad85426fc338..583c23eaf9a24 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java similarity index 97% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java index d077b732341ca..e4e907072c8cb 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java similarity index 96% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java index 9e12c3cc34ed7..57f53759a0104 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java @@ -5,7 +5,7 @@ * */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java similarity index 96% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java index 84f46a30406fd..28a3127c3900b 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java similarity index 96% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java index 2f8d3c5e43037..117d9df4aeedf 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportDeleteLifecycleAction.java similarity index 95% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportDeleteLifecycleAction.java index c35e3495c05a5..0b6c6b3a65d4e 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportDeleteLifecycleAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ResourceNotFoundException; @@ -46,7 +46,7 @@ public class TransportDeleteLifecycleAction extends TransportMasterNodeAction { @Inject public TransportGetStatusAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(GetStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, Request::new); + Request::new, indexNameExpressionResolver); } @Override @@ -39,8 +42,8 @@ protected String executor() { } @Override - protected Response newResponse() { - return new Response(); + protected Response read(StreamInput in) throws IOException { + return new Response(in); } @Override diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java similarity index 92% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java index 782baa2828b63..3cf10d632cb8d 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Request; import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Response; -import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleService; +import org.elasticsearch.xpack.ilm.IndexLifecycleService; import java.io.IOException; @@ -38,8 +38,8 @@ public class TransportMoveToStepAction extends TransportMasterNodeAction listener) { clusterService.submitStateUpdateTask("ilm_operation_mode_update", diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java similarity index 88% rename from x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java rename to x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java index 2ffa519c9c183..76fde6e42cbea 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; -import org.elasticsearch.xpack.indexlifecycle.OperationModeUpdateTask; +import org.elasticsearch.xpack.ilm.OperationModeUpdateTask; import java.io.IOException; @@ -33,8 +33,8 @@ public class TransportStopILMAction extends TransportMasterNodeAction listener) { clusterService.submitStateUpdateTask("ilm_operation_mode_update", diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java new file mode 100644 index 0000000000000..3a30a6b1827f7 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.metadata.RepositoriesMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.scheduler.CronSchedule; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; + +import java.io.Closeable; +import java.time.Clock; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * {@code SnapshotLifecycleService} manages snapshot policy scheduling and triggering of the + * {@link SnapshotLifecycleTask}. It reacts to new policies in the cluster state by scheduling a + * task according to the policy's schedule. + */ +public class SnapshotLifecycleService implements LocalNodeMasterListener, Closeable, ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(SnapshotLifecycleMetadata.class); + private static final String JOB_PATTERN_SUFFIX = "-\\d+$"; + + private final SchedulerEngine scheduler; + private final ClusterService clusterService; + private final SnapshotLifecycleTask snapshotTask; + private final Map scheduledTasks = ConcurrentCollections.newConcurrentMap(); + private volatile boolean isMaster = false; + + public SnapshotLifecycleService(Settings settings, + Supplier taskSupplier, + ClusterService clusterService, + Clock clock) { + this.scheduler = new SchedulerEngine(settings, clock); + this.clusterService = clusterService; + this.snapshotTask = taskSupplier.get(); + clusterService.addLocalNodeMasterListener(this); // TODO: change this not to use 'this' + clusterService.addListener(this); + } + + @Override + public void clusterChanged(final ClusterChangedEvent event) { + if (this.isMaster) { + final ClusterState state = event.state(); + + if (ilmStoppedOrStopping(state)) { + if (scheduler.scheduledJobIds().size() > 0) { + cancelSnapshotJobs(); + } + return; + } + + scheduleSnapshotJobs(state); + cleanupDeletedPolicies(state); + } + } + + @Override + public void onMaster() { + this.isMaster = true; + scheduler.register(snapshotTask); + final ClusterState state = clusterService.state(); + if (ilmStoppedOrStopping(state)) { + // ILM is currently stopped, so don't schedule jobs + return; + } + scheduleSnapshotJobs(state); + } + + @Override + public void offMaster() { + this.isMaster = false; + scheduler.unregister(snapshotTask); + cancelSnapshotJobs(); + } + + // Only used for testing + SchedulerEngine getScheduler() { + return this.scheduler; + } + + /** + * Returns true if ILM is in the stopped or stopped state + */ + private static boolean ilmStoppedOrStopping(ClusterState state) { + return Optional.ofNullable((SnapshotLifecycleMetadata) state.metaData().custom(SnapshotLifecycleMetadata.TYPE)) + .map(SnapshotLifecycleMetadata::getOperationMode) + .map(mode -> OperationMode.STOPPING == mode || OperationMode.STOPPED == mode) + .orElse(false); + } + + /** + * Schedule all non-scheduled snapshot jobs contained in the cluster state + */ + public void scheduleSnapshotJobs(final ClusterState state) { + SnapshotLifecycleMetadata snapMeta = state.metaData().custom(SnapshotLifecycleMetadata.TYPE); + if (snapMeta != null) { + snapMeta.getSnapshotConfigurations().values().forEach(this::maybeScheduleSnapshot); + } + } + + public void cleanupDeletedPolicies(final ClusterState state) { + SnapshotLifecycleMetadata snapMeta = state.metaData().custom(SnapshotLifecycleMetadata.TYPE); + if (snapMeta != null) { + // Retrieve all of the expected policy job ids from the policies in the metadata + final Set policyJobIds = snapMeta.getSnapshotConfigurations().values().stream() + .map(SnapshotLifecycleService::getJobId) + .collect(Collectors.toSet()); + + // Cancel all jobs that are *NOT* in the scheduled tasks map + scheduledTasks.keySet().stream() + .filter(jobId -> policyJobIds.contains(jobId) == false) + .forEach(this::cancelScheduledSnapshot); + } + } + + /** + * Schedule the {@link SnapshotLifecyclePolicy} job if it does not already exist. First checks + * to see if any previous versions of the policy were scheduled, and if so, cancels those. If + * the same version of a policy has already been scheduled it does not overwrite the job. + */ + public void maybeScheduleSnapshot(final SnapshotLifecyclePolicyMetadata snapshotLifecyclePolicy) { + final String jobId = getJobId(snapshotLifecyclePolicy); + final Pattern existingJobPattern = Pattern.compile(snapshotLifecyclePolicy.getPolicy().getId() + JOB_PATTERN_SUFFIX); + + // Find and cancel any existing jobs for this policy + final boolean existingJobsFoundAndCancelled = scheduledTasks.keySet().stream() + // Find all jobs matching the `jobid-\d+` pattern + .filter(jId -> existingJobPattern.matcher(jId).matches()) + // Filter out a job that has not been changed (matches the id exactly meaning the version is the same) + .filter(jId -> jId.equals(jobId) == false) + .map(existingJobId -> { + // Cancel existing job so the new one can be scheduled + logger.debug("removing existing snapshot lifecycle job [{}] as it has been updated", existingJobId); + scheduledTasks.remove(existingJobId); + boolean existed = scheduler.remove(existingJobId); + assert existed : "expected job for " + existingJobId + " to exist in scheduler"; + return existed; + }) + .reduce(false, (a, b) -> a || b); + + // Now atomically schedule the new job and add it to the scheduled tasks map. If the jobId + // is identical to an existing job (meaning the version has not changed) then this does + // not reschedule it. + scheduledTasks.computeIfAbsent(jobId, id -> { + final SchedulerEngine.Job job = new SchedulerEngine.Job(jobId, + new CronSchedule(snapshotLifecyclePolicy.getPolicy().getSchedule())); + if (existingJobsFoundAndCancelled) { + logger.info("rescheduling updated snapshot lifecycle job [{}]", jobId); + } else { + logger.info("scheduling snapshot lifecycle job [{}]", jobId); + } + scheduler.add(job); + return job; + }); + } + + /** + * Generate the job id for a given policy metadata. The job id is {@code -} + */ + public static String getJobId(SnapshotLifecyclePolicyMetadata policyMeta) { + return policyMeta.getPolicy().getId() + "-" + policyMeta.getVersion(); + } + + /** + * Cancel all scheduled snapshot jobs + */ + public void cancelSnapshotJobs() { + logger.trace("cancelling all snapshot lifecycle jobs"); + scheduler.scheduledJobIds().forEach(scheduler::remove); + scheduledTasks.clear(); + } + + /** + * Cancel the given policy job id (from {@link #getJobId(SnapshotLifecyclePolicyMetadata)} + */ + public void cancelScheduledSnapshot(final String lifecycleJobId) { + logger.debug("cancelling snapshot lifecycle job [{}] as it no longer exists", lifecycleJobId); + scheduledTasks.remove(lifecycleJobId); + scheduler.remove(lifecycleJobId); + } + + /** + * Validates that the {@code repository} exists as a registered snapshot repository + * @throws IllegalArgumentException if the repository does not exist + */ + public static void validateRepositoryExists(final String repository, final ClusterState state) { + Optional.ofNullable((RepositoriesMetaData) state.metaData().custom(RepositoriesMetaData.TYPE)) + .map(repoMeta -> repoMeta.repository(repository)) + .orElseThrow(() -> new IllegalArgumentException("no such repository [" + repository + "]")); + } + + @Override + public String executorName() { + return ThreadPool.Names.SNAPSHOT; + } + + @Override + public void close() { + this.scheduler.stop(); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java new file mode 100644 index 0000000000000..b15a5d46145c1 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotInvocationRecord; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryItem; +import org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryStore; +import org.elasticsearch.xpack.ilm.LifecyclePolicySecurityClient; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; + +public class SnapshotLifecycleTask implements SchedulerEngine.Listener { + + private static Logger logger = LogManager.getLogger(SnapshotLifecycleTask.class); + + private final Client client; + private final ClusterService clusterService; + private final SnapshotHistoryStore historyStore; + + public SnapshotLifecycleTask(final Client client, final ClusterService clusterService, final SnapshotHistoryStore historyStore) { + this.client = client; + this.clusterService = clusterService; + this.historyStore = historyStore; + } + + @Override + public void triggered(SchedulerEngine.Event event) { + logger.debug("snapshot lifecycle policy task triggered from job [{}]", event.getJobName()); + + final Optional snapshotName = maybeTakeSnapshot(event.getJobName(), client, clusterService, historyStore); + + // Would be cleaner if we could use Optional#ifPresentOrElse + snapshotName.ifPresent(name -> + logger.info("snapshot lifecycle policy job [{}] issued new snapshot creation for [{}] successfully", + event.getJobName(), name)); + + if (snapshotName.isPresent() == false) { + logger.warn("snapshot lifecycle policy for job [{}] no longer exists, snapshot not created", event.getJobName()); + } + } + + /** + * For the given job id (a combination of policy id and version), issue a create snapshot + * request. On a successful or failed create snapshot issuing the state is stored in the cluster + * state in the policy's metadata + * @return An optional snapshot name if the request was issued successfully + */ + public static Optional maybeTakeSnapshot(final String jobId, final Client client, final ClusterService clusterService, + final SnapshotHistoryStore historyStore) { + Optional maybeMetadata = getSnapPolicyMetadata(jobId, clusterService.state()); + String snapshotName = maybeMetadata.map(policyMetadata -> { + CreateSnapshotRequest request = policyMetadata.getPolicy().toRequest(); + final LifecyclePolicySecurityClient clientWithHeaders = new LifecyclePolicySecurityClient(client, + ClientHelper.INDEX_LIFECYCLE_ORIGIN, policyMetadata.getHeaders()); + logger.info("snapshot lifecycle policy [{}] issuing create snapshot [{}]", + policyMetadata.getPolicy().getId(), request.snapshot()); + clientWithHeaders.admin().cluster().createSnapshot(request, new ActionListener<>() { + @Override + public void onResponse(CreateSnapshotResponse createSnapshotResponse) { + logger.debug("snapshot response for [{}]: {}", + policyMetadata.getPolicy().getId(), Strings.toString(createSnapshotResponse)); + final long timestamp = Instant.now().toEpochMilli(); + clusterService.submitStateUpdateTask("slm-record-success-" + policyMetadata.getPolicy().getId(), + WriteJobStatus.success(policyMetadata.getPolicy().getId(), request.snapshot(), timestamp)); + historyStore.putAsync(SnapshotHistoryItem.successRecord(timestamp, policyMetadata.getPolicy(), request.snapshot())); + } + + @Override + public void onFailure(Exception e) { + logger.error("failed to issue create snapshot request for snapshot lifecycle policy [{}]: {}", + policyMetadata.getPolicy().getId(), e); + final long timestamp = Instant.now().toEpochMilli(); + clusterService.submitStateUpdateTask("slm-record-failure-" + policyMetadata.getPolicy().getId(), + WriteJobStatus.failure(policyMetadata.getPolicy().getId(), request.snapshot(), timestamp, e)); + final SnapshotHistoryItem failureRecord; + try { + failureRecord = SnapshotHistoryItem.failureRecord(timestamp, policyMetadata.getPolicy(), request.snapshot(), e); + historyStore.putAsync(failureRecord); + } catch (IOException ex) { + // This shouldn't happen unless there's an issue with serializing the original exception, which shouldn't happen + logger.error(new ParameterizedMessage( + "failed to record snapshot creation failure for snapshot lifecycle policy [{}]", + policyMetadata.getPolicy().getId()), e); + } + } + }); + return request.snapshot(); + }).orElse(null); + + return Optional.ofNullable(snapshotName); + } + + /** + * For the given job id, return an optional policy metadata object, if one exists + */ + static Optional getSnapPolicyMetadata(final String jobId, final ClusterState state) { + return Optional.ofNullable((SnapshotLifecycleMetadata) state.metaData().custom(SnapshotLifecycleMetadata.TYPE)) + .map(SnapshotLifecycleMetadata::getSnapshotConfigurations) + .flatMap(configMap -> configMap.values().stream() + .filter(policyMeta -> jobId.equals(SnapshotLifecycleService.getJobId(policyMeta))) + .findFirst()); + } + + /** + * A cluster state update task to write the result of a snapshot job to the cluster metadata for the associated policy. + */ + private static class WriteJobStatus extends ClusterStateUpdateTask { + private static final ToXContent.Params STACKTRACE_PARAMS = + new ToXContent.MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); + + private final String policyName; + private final String snapshotName; + private final long timestamp; + private final Optional exception; + + private WriteJobStatus(String policyName, String snapshotName, long timestamp, Optional exception) { + this.policyName = policyName; + this.snapshotName = snapshotName; + this.exception = exception; + this.timestamp = timestamp; + } + + static WriteJobStatus success(String policyId, String snapshotName, long timestamp) { + return new WriteJobStatus(policyId, snapshotName, timestamp, Optional.empty()); + } + + static WriteJobStatus failure(String policyId, String snapshotName, long timestamp, Exception exception) { + return new WriteJobStatus(policyId, snapshotName, timestamp, Optional.of(exception)); + } + + private String exceptionToString() throws IOException { + if (exception.isPresent()) { + try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { + causeXContentBuilder.startObject(); + ElasticsearchException.generateThrowableXContent(causeXContentBuilder, STACKTRACE_PARAMS, exception.get()); + causeXContentBuilder.endObject(); + return BytesReference.bytes(causeXContentBuilder).utf8ToString(); + } + } + return null; + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + SnapshotLifecycleMetadata snapMeta = currentState.metaData().custom(SnapshotLifecycleMetadata.TYPE); + + assert snapMeta != null : "this should never be called while the snapshot lifecycle cluster metadata is null"; + if (snapMeta == null) { + logger.error("failed to record snapshot [{}] for snapshot [{}] in policy [{}]: snapshot lifecycle metadata is null", + exception.isPresent() ? "failure" : "success", snapshotName, policyName); + return currentState; + } + + Map snapLifecycles = new HashMap<>(snapMeta.getSnapshotConfigurations()); + SnapshotLifecyclePolicyMetadata policyMetadata = snapLifecycles.get(policyName); + if (policyMetadata == null) { + logger.warn("failed to record snapshot [{}] for snapshot [{}] in policy [{}]: policy not found", + exception.isPresent() ? "failure" : "success", snapshotName, policyName); + return currentState; + } + + SnapshotLifecyclePolicyMetadata.Builder newPolicyMetadata = SnapshotLifecyclePolicyMetadata.builder(policyMetadata); + + if (exception.isPresent()) { + newPolicyMetadata.setLastFailure(new SnapshotInvocationRecord(snapshotName, timestamp, exceptionToString())); + } else { + newPolicyMetadata.setLastSuccess(new SnapshotInvocationRecord(snapshotName, timestamp, null)); + } + + snapLifecycles.put(policyName, newPolicyMetadata.build()); + SnapshotLifecycleMetadata lifecycleMetadata = new SnapshotLifecycleMetadata(snapLifecycles, snapMeta.getOperationMode()); + MetaData currentMeta = currentState.metaData(); + return ClusterState.builder(currentState) + .metaData(MetaData.builder(currentMeta) + .putCustom(SnapshotLifecycleMetadata.TYPE, lifecycleMetadata)) + .build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("failed to record snapshot policy execution status for snapshot [{}] in policy [{}], (source: [{}]): {}", + snapshotName, policyName, source, e); + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..19634a4a72f27 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.DeleteSnapshotLifecycleAction; + +public class RestDeleteSnapshotLifecycleAction extends BaseRestHandler { + + public RestDeleteSnapshotLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.DELETE, "/_slm/policy/{name}", this); + } + + @Override + public String getName() { + return "slm_delete_lifecycle"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + String lifecycleId = request.param("name"); + DeleteSnapshotLifecycleAction.Request req = new DeleteSnapshotLifecycleAction.Request(lifecycleId); + req.timeout(request.paramAsTime("timeout", req.timeout())); + req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + + return channel -> client.execute(DeleteSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..779f0a36daf3e --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.ExecuteSnapshotLifecycleAction; + +import java.io.IOException; + +public class RestExecuteSnapshotLifecycleAction extends BaseRestHandler { + + public RestExecuteSnapshotLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/_slm/policy/{name}/_execute", this); + } + + @Override + public String getName() { + return "slm_execute_lifecycle"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String snapLifecycleId = request.param("name"); + ExecuteSnapshotLifecycleAction.Request req = new ExecuteSnapshotLifecycleAction.Request(snapLifecycleId); + req.timeout(request.paramAsTime("timeout", req.timeout())); + req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + return channel -> client.execute(ExecuteSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..8140ffb1b156b --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.GetSnapshotLifecycleAction; + +public class RestGetSnapshotLifecycleAction extends BaseRestHandler { + + public RestGetSnapshotLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/_slm/policy", this); + controller.registerHandler(RestRequest.Method.GET, "/_slm/policy/{name}", this); + } + + @Override + public String getName() { + return "slm_get_lifecycle"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + String[] lifecycleNames = Strings.splitStringByCommaToArray(request.param("name")); + GetSnapshotLifecycleAction.Request req = new GetSnapshotLifecycleAction.Request(lifecycleNames); + req.timeout(request.paramAsTime("timeout", req.timeout())); + req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + + return channel -> client.execute(GetSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..5e363dc0b6267 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.PutSnapshotLifecycleAction; + +import java.io.IOException; + +public class RestPutSnapshotLifecycleAction extends BaseRestHandler { + + public RestPutSnapshotLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/_slm/policy/{name}", this); + } + + @Override + public String getName() { + return "slm_put_lifecycle"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String snapLifecycleName = request.param("name"); + try (XContentParser parser = request.contentParser()) { + PutSnapshotLifecycleAction.Request req = PutSnapshotLifecycleAction.Request.parseRequest(snapLifecycleName, parser); + req.timeout(request.paramAsTime("timeout", req.timeout())); + req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + return channel -> client.execute(PutSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..a851550277895 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.DeleteSnapshotLifecycleAction; + +import java.io.IOException; +import java.util.Map; +import java.util.stream.Collectors; + +public class TransportDeleteSnapshotLifecycleAction extends + TransportMasterNodeAction { + + @Inject + public TransportDeleteSnapshotLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(DeleteSnapshotLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, + DeleteSnapshotLifecycleAction.Request::new, indexNameExpressionResolver); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DeleteSnapshotLifecycleAction.Response read(StreamInput in) throws IOException { + return new DeleteSnapshotLifecycleAction.Response(in); + } + + @Override + protected void masterOperation(Task task, DeleteSnapshotLifecycleAction.Request request, + ClusterState state, + ActionListener listener) throws Exception { + clusterService.submitStateUpdateTask("delete-snapshot-lifecycle-" + request.getLifecycleId(), + new AckedClusterStateUpdateTask(request, listener) { + @Override + protected DeleteSnapshotLifecycleAction.Response newResponse(boolean acknowledged) { + return new DeleteSnapshotLifecycleAction.Response(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + SnapshotLifecycleMetadata snapMeta = currentState.metaData().custom(SnapshotLifecycleMetadata.TYPE); + if (snapMeta == null) { + throw new ResourceNotFoundException("snapshot lifecycle policy not found: {}", request.getLifecycleId()); + } + // Check that the policy exists in the first place + snapMeta.getSnapshotConfigurations().entrySet().stream() + .filter(e -> e.getValue().getPolicy().getId().equals(request.getLifecycleId())) + .findAny() + .orElseThrow(() -> new ResourceNotFoundException("snapshot lifecycle policy not found: {}", + request.getLifecycleId())); + + Map newConfigs = snapMeta.getSnapshotConfigurations().entrySet().stream() + .filter(e -> e.getKey().equals(request.getLifecycleId()) == false) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + MetaData metaData = currentState.metaData(); + return ClusterState.builder(currentState) + .metaData(MetaData.builder(metaData) + .putCustom(SnapshotLifecycleMetadata.TYPE, + new SnapshotLifecycleMetadata(newConfigs, snapMeta.getOperationMode()))) + .build(); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(DeleteSnapshotLifecycleAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..61a3e24e753d2 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.ExecuteSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryStore; +import org.elasticsearch.xpack.slm.SnapshotLifecycleService; +import org.elasticsearch.xpack.slm.SnapshotLifecycleTask; + +import java.io.IOException; +import java.util.Optional; + +public class TransportExecuteSnapshotLifecycleAction + extends TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportExecuteSnapshotLifecycleAction.class); + + private final Client client; + private final SnapshotHistoryStore historyStore; + + @Inject + public TransportExecuteSnapshotLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Client client, SnapshotHistoryStore historyStore) { + super(ExecuteSnapshotLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, + ExecuteSnapshotLifecycleAction.Request::new, indexNameExpressionResolver); + this.client = client; + this.historyStore = historyStore; + } + @Override + protected String executor() { + return ThreadPool.Names.SNAPSHOT; + } + + @Override + protected ExecuteSnapshotLifecycleAction.Response read(StreamInput in) throws IOException { + return new ExecuteSnapshotLifecycleAction.Response(in); + } + + @Override + protected void masterOperation(final Task task, final ExecuteSnapshotLifecycleAction.Request request, + final ClusterState state, + final ActionListener listener) { + try { + final String policyId = request.getLifecycleId(); + SnapshotLifecycleMetadata snapMeta = state.metaData().custom(SnapshotLifecycleMetadata.TYPE); + if (snapMeta == null) { + listener.onFailure(new IllegalArgumentException("no such snapshot lifecycle policy [" + policyId + "]")); + return; + } + + SnapshotLifecyclePolicyMetadata policyMetadata = snapMeta.getSnapshotConfigurations().get(policyId); + if (policyMetadata == null) { + listener.onFailure(new IllegalArgumentException("no such snapshot lifecycle policy [" + policyId + "]")); + return; + } + + final Optional snapshotName = SnapshotLifecycleTask.maybeTakeSnapshot(SnapshotLifecycleService.getJobId(policyMetadata), + client, clusterService, historyStore); + if (snapshotName.isPresent()) { + listener.onResponse(new ExecuteSnapshotLifecycleAction.Response(snapshotName.get())); + } else { + listener.onFailure(new ElasticsearchException("failed to execute snapshot lifecycle policy [" + policyId + "]")); + } + } catch (Exception e) { + listener.onFailure(e); + } + } + + @Override + protected ClusterBlockException checkBlock(ExecuteSnapshotLifecycleAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..39ab43b6fbf1d --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyItem; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.GetSnapshotLifecycleAction; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +public class TransportGetSnapshotLifecycleAction extends + TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportPutSnapshotLifecycleAction.class); + + @Inject + public TransportGetSnapshotLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(GetSnapshotLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetSnapshotLifecycleAction.Request::new, indexNameExpressionResolver); + } + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetSnapshotLifecycleAction.Response read(StreamInput in) throws IOException { + return new GetSnapshotLifecycleAction.Response(in); + } + + @Override + protected void masterOperation(final Task task, final GetSnapshotLifecycleAction.Request request, + final ClusterState state, + final ActionListener listener) { + SnapshotLifecycleMetadata snapMeta = state.metaData().custom(SnapshotLifecycleMetadata.TYPE); + if (snapMeta == null) { + listener.onResponse(new GetSnapshotLifecycleAction.Response(Collections.emptyList())); + } else { + final Set ids = new HashSet<>(Arrays.asList(request.getLifecycleIds())); + List lifecycles = snapMeta.getSnapshotConfigurations() + .values() + .stream() + .filter(meta -> { + if (ids.isEmpty()) { + return true; + } else { + return ids.contains(meta.getPolicy().getId()); + } + }) + .map(SnapshotLifecyclePolicyItem::new) + .collect(Collectors.toList()); + listener.onResponse(new GetSnapshotLifecycleAction.Response(lifecycles)); + } + } + + @Override + protected ClusterBlockException checkBlock(GetSnapshotLifecycleAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java new file mode 100644 index 0000000000000..e7648784aec54 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.action.PutSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.SnapshotLifecycleService; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +public class TransportPutSnapshotLifecycleAction extends + TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportPutSnapshotLifecycleAction.class); + + @Inject + public TransportPutSnapshotLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(PutSnapshotLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, + PutSnapshotLifecycleAction.Request::new, indexNameExpressionResolver); + } + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected PutSnapshotLifecycleAction.Response read(StreamInput in) throws IOException { + return new PutSnapshotLifecycleAction.Response(in); + } + + @Override + protected void masterOperation(final Task task, final PutSnapshotLifecycleAction.Request request, + final ClusterState state, + final ActionListener listener) { + SnapshotLifecycleService.validateRepositoryExists(request.getLifecycle().getRepository(), state); + + // headers from the thread context stored by the AuthenticationService to be shared between the + // REST layer and the Transport layer here must be accessed within this thread and not in the + // cluster state thread in the ClusterStateUpdateTask below since that thread does not share the + // same context, and therefore does not have access to the appropriate security headers. + final Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + LifecyclePolicy.validatePolicyName(request.getLifecycleId()); + clusterService.submitStateUpdateTask("put-snapshot-lifecycle-" + request.getLifecycleId(), + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + SnapshotLifecycleMetadata snapMeta = currentState.metaData().custom(SnapshotLifecycleMetadata.TYPE); + + String id = request.getLifecycleId(); + final SnapshotLifecycleMetadata lifecycleMetadata; + if (snapMeta == null) { + SnapshotLifecyclePolicyMetadata meta = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(request.getLifecycle()) + .setHeaders(filteredHeaders) + .setModifiedDate(Instant.now().toEpochMilli()) + .build(); + IndexLifecycleMetadata ilmMeta = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); + OperationMode mode = Optional.ofNullable(ilmMeta) + .map(IndexLifecycleMetadata::getOperationMode) + .orElse(OperationMode.RUNNING); + lifecycleMetadata = new SnapshotLifecycleMetadata(Collections.singletonMap(id, meta), mode); + logger.info("adding new snapshot lifecycle [{}]", id); + } else { + Map snapLifecycles = new HashMap<>(snapMeta.getSnapshotConfigurations()); + SnapshotLifecyclePolicyMetadata oldLifecycle = snapLifecycles.get(id); + SnapshotLifecyclePolicyMetadata newLifecycle = SnapshotLifecyclePolicyMetadata.builder(oldLifecycle) + .setPolicy(request.getLifecycle()) + .setHeaders(filteredHeaders) + .setVersion(oldLifecycle == null ? 1L : oldLifecycle.getVersion() + 1) + .setModifiedDate(Instant.now().toEpochMilli()) + .build(); + snapLifecycles.put(id, newLifecycle); + lifecycleMetadata = new SnapshotLifecycleMetadata(snapLifecycles, snapMeta.getOperationMode()); + if (oldLifecycle == null) { + logger.info("adding new snapshot lifecycle [{}]", id); + } else { + logger.info("updating existing snapshot lifecycle [{}]", id); + } + } + + MetaData currentMeta = currentState.metaData(); + return ClusterState.builder(currentState) + .metaData(MetaData.builder(currentMeta) + .putCustom(SnapshotLifecycleMetadata.TYPE, lifecycleMetadata)) + .build(); + } + + @Override + protected PutSnapshotLifecycleAction.Response newResponse(boolean acknowledged) { + return new PutSnapshotLifecycleAction.Response(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(PutSnapshotLifecycleAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/package-info.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/package-info.java new file mode 100644 index 0000000000000..39267c93e366e --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * This package contains all the SLM Rest and Transport actions. + * + *

The {@link org.elasticsearch.xpack.slm.action.TransportPutSnapshotLifecycleAction} creates or updates a snapshot + * lifecycle policy in the cluster state. The {@link org.elasticsearch.xpack.slm.action.TransportGetSnapshotLifecycleAction} + * simply retrieves a policy by id. The {@link org.elasticsearch.xpack.slm.action.TransportDeleteSnapshotLifecycleAction} + * removes a policy from the cluster state. These actions only interact with the cluster state. Most of the logic that take place in + * response to these actions happens on the master node in the {@link org.elasticsearch.xpack.slm.SnapshotLifecycleService}. + * + *

The {@link org.elasticsearch.xpack.slm.action.TransportExecuteSnapshotLifecycleAction} operates as if the snapshot + * policy given was immediately triggered by the scheduler. It does not interfere with any currently scheduled operations, it just runs + * the snapshot operation ad hoc. + */ +package org.elasticsearch.xpack.slm.action; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/package-info.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/package-info.java new file mode 100644 index 0000000000000..e1f669ff333d8 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/package-info.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * This is the Snapshot Lifecycle Management (SLM) main package. SLM is part of the wider ILM feature, reusing quite a bit of the + * functionality for itself in some places, which is why the two features are contained in the same plugin. + * + * This package contains the {@link org.elasticsearch.xpack.slm.SnapshotLifecycleService} and + * {@link org.elasticsearch.xpack.slm.SnapshotLifecycleTask}, as well as the Rest and Transport actions for the + * feature set. + * This package contains the primary execution logic and most of the user facing + * surface area for the plugin, but not everything. The model objects for the cluster state as well as several supporting classes are + * contained in the {@link org.elasticsearch.xpack.core.snapshotlifecycle} package. + * + *

{@link org.elasticsearch.xpack.slm.SnapshotLifecycleService} maintains an internal + * {@link org.elasticsearch.xpack.core.scheduler.SchedulerEngine SchedulerEngine} that handles scheduling snapshots. The service + * executes on the currently elected master node. It listens to the cluster state, detecting new policies to schedule, and unscheduling + * policies when they are deleted or if ILM is stopped. The bulk of this scheduling management is handled within + * {@link org.elasticsearch.xpack.slm.SnapshotLifecycleService#maybeScheduleSnapshot(SnapshotLifecyclePolicyMetadata)} + * which is executed on all snapshot policies each update. + * + *

The {@link org.elasticsearch.xpack.slm.SnapshotLifecycleTask} object is what receives an event when a scheduled policy + * is triggered for execution. It constructs a snapshot request and runs it as the user who originally set up the policy. The bulk of this + * logic is contained in the + * {@link org.elasticsearch.xpack.slm.SnapshotLifecycleTask#maybeTakeSnapshot(String, Client, ClusterService, + * SnapshotHistoryStore)} method. After a snapshot request has been submitted, it persists the result (success or failure) in a history + * store (an index), caching the latest success and failure information in the cluster state. It is important to note that this task + * fires the snapshot request off and forgets it; It does not wait until the entire snapshot completes. Any success or failure that this + * task sees will be from the initial submission of the snapshot request only. + */ +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryStore; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java index 3776363cf175e..b7b0c69f4b1af 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.core.indexlifecycle; -import org.elasticsearch.xpack.indexlifecycle.LockableLifecycleType; +import org.elasticsearch.xpack.ilm.LockableLifecycleType; import java.util.Map; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java similarity index 98% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java index 963ce5d2e2a6f..f4463aca37884 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -35,8 +35,8 @@ import org.elasticsearch.xpack.core.indexlifecycle.Step; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; -import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleRunnerTests.MockClusterStateActionStep; -import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleRunnerTests.MockClusterStateWaitStep; +import org.elasticsearch.xpack.ilm.IndexLifecycleRunnerTests.MockClusterStateActionStep; +import org.elasticsearch.xpack.ilm.IndexLifecycleRunnerTests.MockClusterStateWaitStep; import org.junit.Before; import org.mockito.Mockito; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatureSetUsageTests.java similarity index 97% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatureSetUsageTests.java index 7bd974d31c176..b67b44a1e0458 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatureSetUsageTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInfoTransportActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInfoTransportActionTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java index 7265c5d6ff6be..c5a19e76b0cec 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInfoTransportActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.Version; import org.elasticsearch.action.support.ActionFilters; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index 6d6727174bc72..c44b656d768cb 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -101,6 +101,9 @@ protected Settings nodeSettings(int nodeOrdinal) { settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); settings.put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s"); + + // This is necessary to prevent SLM installing a lifecycle policy, these tests assume a blank slate + settings.put(LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING.getKey(), false); return settings.build(); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java index 65f8399a6bd76..455193ef2b595 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterModule; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java index 511d0e5be1ab9..6499ae61637a2 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java index 3757c1cd5fb4f..dd4f69c5ad5d4 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecyclePolicyClientTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecyclePolicyClientTests.java index 4fec7ba80db8e..b8d802de66b4a 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecyclePolicyClientTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchAction; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LockableLifecycleType.java similarity index 97% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LockableLifecycleType.java index 3e09133c435a8..4853f8ae6ac2b 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LockableLifecycleType.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java index 3ad3f27ebdf98..b81af739f8f54 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java index f166bba25c986..54112f0a743a0 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java similarity index 88% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java index dccd12e15f114..b9bba9cd57409 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java @@ -3,16 +3,17 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; import java.util.Collections; @@ -57,11 +58,15 @@ private void assertNoMove(OperationMode currentMode, OperationMode requestedMode private OperationMode executeUpdate(boolean metadataInstalled, OperationMode currentMode, OperationMode requestMode, boolean assertSameClusterState) { IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), currentMode); + SnapshotLifecycleMetadata snapshotLifecycleMetadata = new SnapshotLifecycleMetadata(Collections.emptyMap(), currentMode); ImmutableOpenMap.Builder customsMapBuilder = ImmutableOpenMap.builder(); MetaData.Builder metaData = MetaData.builder() .persistentSettings(settings(Version.CURRENT).build()); if (metadataInstalled) { - metaData.customs(customsMapBuilder.fPut(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata).build()); + metaData.customs(customsMapBuilder + .fPut(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata) + .fPut(SnapshotLifecycleMetadata.TYPE, snapshotLifecycleMetadata) + .build()); } ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); OperationModeUpdateTask task = new OperationModeUpdateTask(requestMode); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PhaseStatsTests.java similarity index 97% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PhaseStatsTests.java index fe7fd1fca05d3..17e32e61357c7 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PhaseStatsTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStatsTests.java similarity index 97% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStatsTests.java index 5ced745c2fb3f..39843bb3880f0 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStatsTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java index 611522a59b0d4..4e53a113fd7ec 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.Version; import org.elasticsearch.client.Client; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/RandomStepInfo.java similarity index 96% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/RandomStepInfo.java index 85084223481c3..e5fa6bc152f19 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/RandomStepInfo.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -53,4 +53,4 @@ public boolean equals(Object obj) { public String toString() { return Strings.toString(this); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java similarity index 99% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java index a8b16d3ecfdf9..af4313d7e3baf 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTaskTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/TimeValueScheduleTests.java similarity index 98% rename from x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/TimeValueScheduleTests.java index 919c4227a2b26..a0969df7cd0a3 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/TimeValueScheduleTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.indexlifecycle; +package org.elasticsearch.xpack.ilm; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java new file mode 100644 index 0000000000000..24eedfafa488f --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -0,0 +1,200 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.startsWith; + +public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase { + + private String id; + + public void testNameGeneration() { + long time = 1552684146542L; // Fri Mar 15 2019 21:09:06 UTC + SnapshotLifecyclePolicy.ResolverContext context = new SnapshotLifecyclePolicy.ResolverContext(time); + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy("id", "name", "1 * * * * ?", "repo", Collections.emptyMap()); + assertThat(p.generateSnapshotName(context), startsWith("name-")); + assertThat(p.generateSnapshotName(context).length(), greaterThan("name-".length())); + + p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap()); + assertThat(p.generateSnapshotName(context), startsWith("name-2019.03.15-")); + assertThat(p.generateSnapshotName(context).length(), greaterThan("name-2019.03.15-".length())); + + p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap()); + assertThat(p.generateSnapshotName(context), startsWith("name-2019.03.01-")); + + p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap()); + assertThat(p.generateSnapshotName(context), startsWith("name-2019-03-15.21:09:00-")); + } + + public void testNextExecutionTime() { + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", Collections.emptyMap()); + assertThat(p.calculateNextExecution(), equalTo(4078864860000L)); + } + + public void testValidation() { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("a,b", "", + "* * * * * L", " ", Collections.emptyMap()); + + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), + containsInAnyOrder("invalid policy id [a,b]: must not contain ','", + "invalid snapshot name []: must not contain contain" + + " the following characters [ , \", *, \\, <, |, ,, >, /, ?]", + "invalid repository name [ ]: cannot be empty", + "invalid schedule: invalid cron expression [* * * * * L]")); + + policy = new SnapshotLifecyclePolicy("_my_policy", "mySnap", + " ", "repo", Collections.emptyMap()); + + e = policy.validate(); + assertThat(e.validationErrors(), + containsInAnyOrder("invalid policy id [_my_policy]: must not start with '_'", + "invalid snapshot name [mySnap]: must be lowercase", + "invalid schedule [ ]: must not be empty")); + } + + public void testMetadataValidation() { + { + Map configuration = new HashMap<>(); + final String metadataString = randomAlphaOfLength(10); + configuration.put("metadata", metadataString); + + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("mypolicy", "", + "1 * * * * ?", "myrepo", configuration); + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), contains("invalid configuration.metadata [" + metadataString + + "]: must be an object if present")); + } + + { + Map metadata = new HashMap<>(); + metadata.put("policy", randomAlphaOfLength(5)); + Map configuration = new HashMap<>(); + configuration.put("metadata", metadata); + + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("mypolicy", "", + "1 * * * * ?", "myrepo", configuration); + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), contains("invalid configuration.metadata: field name [policy] is reserved and " + + "will be added automatically")); + } + + { + Map metadata = new HashMap<>(); + final int fieldCount = randomIntBetween(67, 100); // 67 is the smallest field count with these sizes that causes an error + final int keyBytes = 5; // chosen arbitrarily + final int valueBytes = 4; // chosen arbitrarily + int totalBytes = fieldCount * (keyBytes + valueBytes + 6 /* bytes of overhead per key/value pair */) + 1; + for (int i = 0; i < fieldCount; i++) { + metadata.put(randomValueOtherThanMany(key -> "policy".equals(key) || metadata.containsKey(key), + () -> randomAlphaOfLength(keyBytes)), randomAlphaOfLength(valueBytes)); + } + Map configuration = new HashMap<>(); + configuration.put("metadata", metadata); + + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("mypolicy", "", + "1 * * * * ?", "myrepo", configuration); + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), contains("invalid configuration.metadata: must be smaller than [1004] bytes, but is [" + + totalBytes + "] bytes")); + } + } + + @Override + protected SnapshotLifecyclePolicy doParseInstance(XContentParser parser) throws IOException { + return SnapshotLifecyclePolicy.parse(parser, id); + } + + @Override + protected SnapshotLifecyclePolicy createTestInstance() { + id = randomAlphaOfLength(5); + return randomSnapshotLifecyclePolicy(id); + } + + public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String id) { + Map config = null; + if (randomBoolean()) { + config = new HashMap<>(); + for (int i = 0; i < randomIntBetween(2, 5); i++) { + config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); + } + } + return new SnapshotLifecyclePolicy(id, + randomAlphaOfLength(4), + randomSchedule(), + randomAlphaOfLength(4), + config); + } + + private static String randomSchedule() { + return randomIntBetween(0, 59) + " " + + randomIntBetween(0, 59) + " " + + randomIntBetween(0, 12) + " * * ?"; + } + + @Override + protected SnapshotLifecyclePolicy mutateInstance(SnapshotLifecyclePolicy instance) throws IOException { + switch (between(0, 4)) { + case 0: + return new SnapshotLifecyclePolicy(instance.getId() + randomAlphaOfLength(2), + instance.getName(), + instance.getSchedule(), + instance.getRepository(), + instance.getConfig()); + case 1: + return new SnapshotLifecyclePolicy(instance.getId(), + instance.getName() + randomAlphaOfLength(2), + instance.getSchedule(), + instance.getRepository(), + instance.getConfig()); + case 2: + return new SnapshotLifecyclePolicy(instance.getId(), + instance.getName(), + randomValueOtherThan(instance.getSchedule(), SnapshotLifecyclePolicyTests::randomSchedule), + instance.getRepository(), + instance.getConfig()); + case 3: + return new SnapshotLifecyclePolicy(instance.getId(), + instance.getName(), + instance.getSchedule(), + instance.getRepository() + randomAlphaOfLength(2), + instance.getConfig()); + case 4: + Map newConfig = new HashMap<>(); + for (int i = 0; i < randomIntBetween(2, 5); i++) { + newConfig.put(randomAlphaOfLength(3), randomAlphaOfLength(3)); + } + return new SnapshotLifecyclePolicy(instance.getId(), + instance.getName() + randomAlphaOfLength(2), + instance.getSchedule(), + instance.getRepository(), + newConfig); + default: + throw new AssertionError("failure, got illegal switch case"); + } + } + + @Override + protected Writeable.Reader instanceReader() { + return SnapshotLifecyclePolicy::new; + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java new file mode 100644 index 0000000000000..272203bb1453b --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java @@ -0,0 +1,339 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoriesMetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class SnapshotLifecycleServiceTests extends ESTestCase { + + public void testGetJobId() { + String id = randomAlphaOfLengthBetween(1, 10) + (randomBoolean() ? "" : randomLong()); + SnapshotLifecyclePolicy policy = createPolicy(id); + long version = randomNonNegativeLong(); + SnapshotLifecyclePolicyMetadata meta = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(policy) + .setHeaders(Collections.emptyMap()) + .setVersion(version) + .setModifiedDate(1) + .build(); + assertThat(SnapshotLifecycleService.getJobId(meta), equalTo(id + "-" + version)); + } + + public void testRepositoryExistenceForExistingRepo() { + ClusterState state = ClusterState.builder(new ClusterName("cluster")).build(); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateRepositoryExists("repo", state)); + + assertThat(e.getMessage(), containsString("no such repository [repo]")); + + RepositoryMetaData repo = new RepositoryMetaData("repo", "fs", Settings.EMPTY); + RepositoriesMetaData repoMeta = new RepositoriesMetaData(Collections.singletonList(repo)); + ClusterState stateWithRepo = ClusterState.builder(state) + .metaData(MetaData.builder() + .putCustom(RepositoriesMetaData.TYPE, repoMeta)) + .build(); + + SnapshotLifecycleService.validateRepositoryExists("repo", stateWithRepo); + } + + public void testRepositoryExistenceForMissingRepo() { + ClusterState state = ClusterState.builder(new ClusterName("cluster")).build(); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateRepositoryExists("repo", state)); + + assertThat(e.getMessage(), containsString("no such repository [repo]")); + } + + public void testNothingScheduledWhenNotRunning() { + ClockMock clock = new ClockMock(); + SnapshotLifecyclePolicyMetadata initialPolicy = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(createPolicy("initial", "*/1 * * * * ?")) + .setHeaders(Collections.emptyMap()) + .setVersion(1) + .setModifiedDate(1) + .build(); + ClusterState initialState = createState(new SnapshotLifecycleMetadata( + Collections.singletonMap(initialPolicy.getPolicy().getId(), initialPolicy), OperationMode.RUNNING)); + try (ThreadPool threadPool = new TestThreadPool("test"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(initialState, threadPool); + SnapshotLifecycleService sls = new SnapshotLifecycleService(Settings.EMPTY, + () -> new FakeSnapshotTask(e -> logger.info("triggered")), clusterService, clock)) { + + sls.offMaster(); + + SnapshotLifecyclePolicyMetadata newPolicy = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setHeaders(Collections.emptyMap()) + .setVersion(2) + .setModifiedDate(2) + .build(); + Map policies = new HashMap<>(); + policies.put(newPolicy.getPolicy().getId(), newPolicy); + ClusterState emptyState = createState(new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)); + ClusterState state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING)); + + sls.clusterChanged(new ClusterChangedEvent("1", state, emptyState)); + + // Since the service does not think it is master, it should not be triggered or scheduled + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.emptySet())); + + sls.onMaster(); + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("initial-1"))); + + state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.STOPPING)); + sls.clusterChanged(new ClusterChangedEvent("2", state, emptyState)); + + // Since the service is stopping, jobs should have been cancelled + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.emptySet())); + + state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.STOPPED)); + sls.clusterChanged(new ClusterChangedEvent("3", state, emptyState)); + + // Since the service is stopped, jobs should have been cancelled + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.emptySet())); + + threadPool.shutdownNow(); + } + } + + /** + * Test new policies getting scheduled correctly, updated policies also being scheduled, + * and deleted policies having their schedules cancelled. + */ + public void testPolicyCRUD() throws Exception { + ClockMock clock = new ClockMock(); + final AtomicInteger triggerCount = new AtomicInteger(0); + final AtomicReference> trigger = new AtomicReference<>(e -> triggerCount.incrementAndGet()); + try (ThreadPool threadPool = new TestThreadPool("test"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + SnapshotLifecycleService sls = new SnapshotLifecycleService(Settings.EMPTY, + () -> new FakeSnapshotTask(e -> trigger.get().accept(e)), clusterService, clock)) { + + sls.offMaster(); + SnapshotLifecycleMetadata snapMeta = new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState previousState = createState(snapMeta); + Map policies = new HashMap<>(); + + SnapshotLifecyclePolicyMetadata policy = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setHeaders(Collections.emptyMap()) + .setModifiedDate(1) + .build(); + policies.put(policy.getPolicy().getId(), policy); + snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING); + ClusterState state = createState(snapMeta); + ClusterChangedEvent event = new ClusterChangedEvent("1", state, previousState); + trigger.set(e -> { + fail("trigger should not be invoked"); + }); + sls.clusterChanged(event); + + // Since the service does not think it is master, it should not be triggered or scheduled + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.emptySet())); + + // Change the service to think it's on the master node, events should be scheduled now + sls.onMaster(); + trigger.set(e -> triggerCount.incrementAndGet()); + sls.clusterChanged(event); + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("foo-1"))); + + assertBusy(() -> assertThat(triggerCount.get(), greaterThan(0))); + + clock.freeze(); + int currentCount = triggerCount.get(); + previousState = state; + SnapshotLifecyclePolicyMetadata newPolicy = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setHeaders(Collections.emptyMap()) + .setVersion(2) + .setModifiedDate(2) + .build(); + policies.put(policy.getPolicy().getId(), newPolicy); + state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING)); + event = new ClusterChangedEvent("2", state, previousState); + sls.clusterChanged(event); + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("foo-2"))); + + trigger.set(e -> { + // Make sure the job got updated + assertThat(e.getJobName(), equalTo("foo-2")); + triggerCount.incrementAndGet(); + }); + clock.fastForwardSeconds(1); + + assertBusy(() -> assertThat(triggerCount.get(), greaterThan(currentCount))); + + final int currentCount2 = triggerCount.get(); + previousState = state; + // Create a state simulating the policy being deleted + state = createState(new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)); + event = new ClusterChangedEvent("2", state, previousState); + sls.clusterChanged(event); + clock.fastForwardSeconds(2); + + // The existing job should be cancelled and no longer trigger + assertThat(triggerCount.get(), equalTo(currentCount2)); + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.emptySet())); + + // When the service is no longer master, all jobs should be automatically cancelled + policy = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setHeaders(Collections.emptyMap()) + .setVersion(3) + .setModifiedDate(1) + .build(); + policies.put(policy.getPolicy().getId(), policy); + snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING); + previousState = state; + state = createState(snapMeta); + event = new ClusterChangedEvent("1", state, previousState); + trigger.set(e -> triggerCount.incrementAndGet()); + sls.clusterChanged(event); + clock.fastForwardSeconds(2); + + // Make sure at least one triggers and the job is scheduled + assertBusy(() -> assertThat(triggerCount.get(), greaterThan(currentCount2))); + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("foo-3"))); + + // Signify becoming non-master, the jobs should all be cancelled + sls.offMaster(); + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.emptySet())); + + threadPool.shutdownNow(); + } + } + + /** + * Test for policy ids ending in numbers the way generate job ids doesn't cause confusion + */ + public void testPolicyNamesEndingInNumbers() throws Exception { + ClockMock clock = new ClockMock(); + final AtomicInteger triggerCount = new AtomicInteger(0); + final AtomicReference> trigger = new AtomicReference<>(e -> triggerCount.incrementAndGet()); + try (ThreadPool threadPool = new TestThreadPool("test"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + SnapshotLifecycleService sls = new SnapshotLifecycleService(Settings.EMPTY, + () -> new FakeSnapshotTask(e -> trigger.get().accept(e)), clusterService, clock)) { + sls.onMaster(); + + SnapshotLifecycleMetadata snapMeta = new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState previousState = createState(snapMeta); + Map policies = new HashMap<>(); + + SnapshotLifecyclePolicyMetadata policy = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(createPolicy("foo-2", "30 * * * * ?")) + .setHeaders(Collections.emptyMap()) + .setVersion(1) + .setModifiedDate(1) + .build(); + policies.put(policy.getPolicy().getId(), policy); + snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING); + ClusterState state = createState(snapMeta); + ClusterChangedEvent event = new ClusterChangedEvent("1", state, previousState); + sls.clusterChanged(event); + + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("foo-2-1"))); + + previousState = state; + SnapshotLifecyclePolicyMetadata secondPolicy = SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(createPolicy("foo-1", "45 * * * * ?")) + .setHeaders(Collections.emptyMap()) + .setVersion(2) + .setModifiedDate(1) + .build(); + policies.put(secondPolicy.getPolicy().getId(), secondPolicy); + snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING); + state = createState(snapMeta); + event = new ClusterChangedEvent("2", state, previousState); + sls.clusterChanged(event); + + assertThat(sls.getScheduler().scheduledJobIds(), containsInAnyOrder("foo-2-1", "foo-1-2")); + + sls.offMaster(); + assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.emptySet())); + + threadPool.shutdownNow(); + } + } + + class FakeSnapshotTask extends SnapshotLifecycleTask { + private final Consumer onTriggered; + + FakeSnapshotTask(Consumer onTriggered) { + super(null, null, null); + this.onTriggered = onTriggered; + } + + @Override + public void triggered(SchedulerEngine.Event event) { + logger.info("--> fake snapshot task triggered"); + onTriggered.accept(event); + } + } + + public ClusterState createState(SnapshotLifecycleMetadata snapMeta) { + MetaData metaData = MetaData.builder() + .putCustom(SnapshotLifecycleMetadata.TYPE, snapMeta) + .build(); + return ClusterState.builder(new ClusterName("cluster")) + .metaData(metaData) + .build(); + } + + public static SnapshotLifecyclePolicy createPolicy(String id) { + return createPolicy(id, randomSchedule()); + } + + public static SnapshotLifecyclePolicy createPolicy(String id, String schedule) { + Map config = new HashMap<>(); + config.put("ignore_unavailable", randomBoolean()); + List indices = new ArrayList<>(); + indices.add("foo-*"); + indices.add(randomAlphaOfLength(4)); + config.put("indices", indices); + return new SnapshotLifecyclePolicy(id, randomAlphaOfLength(4), schedule, randomAlphaOfLength(4), config); + } + + private static String randomSchedule() { + return randomIntBetween(0, 59) + " " + + randomIntBetween(0, 59) + " " + + randomIntBetween(0, 12) + " * * ?"; + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java new file mode 100644 index 0000000000000..5dbf940f5adb9 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.TriFunction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryItem; +import org.elasticsearch.xpack.core.snapshotlifecycle.history.SnapshotHistoryStore; + +import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; + +public class SnapshotLifecycleTaskTests extends ESTestCase { + + public void testGetSnapMetadata() { + final String id = randomAlphaOfLength(4); + final SnapshotLifecyclePolicyMetadata slpm = makePolicyMeta(id); + final SnapshotLifecycleMetadata meta = new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING); + + final ClusterState state = ClusterState.builder(new ClusterName("test")) + .metaData(MetaData.builder() + .putCustom(SnapshotLifecycleMetadata.TYPE, meta) + .build()) + .build(); + + final Optional o = + SnapshotLifecycleTask.getSnapPolicyMetadata(SnapshotLifecycleService.getJobId(slpm), state); + + assertTrue("the policy metadata should be retrieved from the cluster state", o.isPresent()); + assertThat(o.get(), equalTo(slpm)); + + assertFalse(SnapshotLifecycleTask.getSnapPolicyMetadata("bad-jobid", state).isPresent()); + } + + public void testSkipCreatingSnapshotWhenJobDoesNotMatch() { + final String id = randomAlphaOfLength(4); + final SnapshotLifecyclePolicyMetadata slpm = makePolicyMeta(id); + final SnapshotLifecycleMetadata meta = new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING); + + final ClusterState state = ClusterState.builder(new ClusterName("test")) + .metaData(MetaData.builder() + .putCustom(SnapshotLifecycleMetadata.TYPE, meta) + .build()) + .build(); + + final ThreadPool threadPool = new TestThreadPool("test"); + try (ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool); + VerifyingClient client = new VerifyingClient(threadPool, (a, r, l) -> { + fail("should not have tried to take a snapshot"); + return null; + })) { + SnapshotHistoryStore historyStore = new VerifyingHistoryStore(null, ZoneOffset.UTC, + item -> fail("should not have tried to store an item")); + + SnapshotLifecycleTask task = new SnapshotLifecycleTask(client, clusterService, historyStore); + + // Trigger the event, but since the job name does not match, it should + // not run the function to create a snapshot + task.triggered(new SchedulerEngine.Event("nonexistent-job", System.currentTimeMillis(), System.currentTimeMillis())); + } + + threadPool.shutdownNow(); + } + + public void testCreateSnapshotOnTrigger() { + final String id = randomAlphaOfLength(4); + final SnapshotLifecyclePolicyMetadata slpm = makePolicyMeta(id); + final SnapshotLifecycleMetadata meta = new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING); + + final ClusterState state = ClusterState.builder(new ClusterName("test")) + .metaData(MetaData.builder() + .putCustom(SnapshotLifecycleMetadata.TYPE, meta) + .build()) + .build(); + + final ThreadPool threadPool = new TestThreadPool("test"); + final String createSnapResponse = "{" + + " \"snapshot\" : {" + + " \"snapshot\" : \"snapshot_1\"," + + " \"uuid\" : \"bcP3ClgCSYO_TP7_FCBbBw\"," + + " \"version_id\" : " + Version.CURRENT.id + "," + + " \"version\" : \"" + Version.CURRENT + "\"," + + " \"indices\" : [ ]," + + " \"include_global_state\" : true," + + " \"state\" : \"SUCCESS\"," + + " \"start_time\" : \"2019-03-19T22:19:53.542Z\"," + + " \"start_time_in_millis\" : 1553033993542," + + " \"end_time\" : \"2019-03-19T22:19:53.567Z\"," + + " \"end_time_in_millis\" : 1553033993567," + + " \"duration_in_millis\" : 25," + + " \"failures\" : [ ]," + + " \"shards\" : {" + + " \"total\" : 0," + + " \"failed\" : 0," + + " \"successful\" : 0" + + " }" + + " }" + + "}"; + + final AtomicBoolean clientCalled = new AtomicBoolean(false); + final SetOnce snapshotName = new SetOnce<>(); + try (ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool); + // This verifying client will verify that we correctly invoked + // client.admin().createSnapshot(...) with the appropriate + // request. It also returns a mock real response + VerifyingClient client = new VerifyingClient(threadPool, + (action, request, listener) -> { + assertFalse(clientCalled.getAndSet(true)); + assertThat(action, instanceOf(CreateSnapshotAction.class)); + assertThat(request, instanceOf(CreateSnapshotRequest.class)); + + CreateSnapshotRequest req = (CreateSnapshotRequest) request; + + SnapshotLifecyclePolicy policy = slpm.getPolicy(); + assertThat(req.snapshot(), startsWith(policy.getName() + "-")); + assertThat(req.repository(), equalTo(policy.getRepository())); + snapshotName.set(req.snapshot()); + if (req.indices().length > 0) { + assertThat(Arrays.asList(req.indices()), equalTo(policy.getConfig().get("indices"))); + } + boolean globalState = policy.getConfig().get("include_global_state") == null || + Boolean.parseBoolean((String) policy.getConfig().get("include_global_state")); + assertThat(req.includeGlobalState(), equalTo(globalState)); + + try { + return CreateSnapshotResponse.fromXContent(createParser(JsonXContent.jsonXContent, createSnapResponse)); + } catch (IOException e) { + fail("failed to parse snapshot response"); + return null; + } + })) { + final AtomicBoolean historyStoreCalled = new AtomicBoolean(false); + SnapshotHistoryStore historyStore = new VerifyingHistoryStore(null, ZoneOffset.UTC, + item -> { + assertFalse(historyStoreCalled.getAndSet(true)); + final SnapshotLifecyclePolicy policy = slpm.getPolicy(); + assertEquals(policy.getId(), item.getPolicyId()); + assertEquals(policy.getRepository(), item.getRepository()); + assertEquals(policy.getConfig(), item.getSnapshotConfiguration()); + assertEquals(snapshotName.get(), item.getSnapshotName()); + }); + + SnapshotLifecycleTask task = new SnapshotLifecycleTask(client, clusterService, historyStore); + // Trigger the event with a matching job name for the policy + task.triggered(new SchedulerEngine.Event(SnapshotLifecycleService.getJobId(slpm), + System.currentTimeMillis(), System.currentTimeMillis())); + + assertTrue("snapshot should be triggered once", clientCalled.get()); + assertTrue("history store should be called once", historyStoreCalled.get()); + } + + threadPool.shutdownNow(); + } + + /** + * A client that delegates to a verifying function for action/request/listener + */ + public static class VerifyingClient extends NoOpClient { + + private final TriFunction, ActionRequest, ActionListener, ActionResponse> verifier; + + VerifyingClient(ThreadPool threadPool, + TriFunction, ActionRequest, ActionListener, ActionResponse> verifier) { + super(threadPool); + this.verifier = verifier; + } + + @Override + @SuppressWarnings("unchecked") + protected void doExecute(ActionType action, + Request request, + ActionListener listener) { + listener.onResponse((Response) verifier.apply(action, request, listener)); + } + } + + private SnapshotLifecyclePolicyMetadata makePolicyMeta(final String id) { + SnapshotLifecyclePolicy policy = SnapshotLifecycleServiceTests.createPolicy(id); + Map headers = new HashMap<>(); + headers.put("X-Opaque-ID", randomAlphaOfLength(4)); + return SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(policy) + .setHeaders(headers) + .setVersion(1) + .setModifiedDate(1) + .build(); + } + + public static class VerifyingHistoryStore extends SnapshotHistoryStore { + + Consumer verifier; + + public VerifyingHistoryStore(Client client, ZoneId timeZone, Consumer verifier) { + super(Settings.EMPTY, client, timeZone); + this.verifier = verifier; + } + + @Override + public void putAsync(SnapshotHistoryItem item) { + verifier.accept(item); + } + } +} diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index ec35e7fb8849f..c0f3d94b776e5 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -46,9 +46,6 @@ bundlePlugin { exclude 'platform/licenses/**' } -compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" -compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" - dependencies { compileOnly project(':modules:lang-painless:spi') compileOnly project(path: xpackModule('core'), configuration: 'default') diff --git a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle index 9bbf34e544152..2d90950791cb1 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -8,7 +8,7 @@ dependencies { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' numberOfNodes = 3 setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' diff --git a/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java b/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java index 31b43eb055523..51e77bf97aef8 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java +++ b/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java @@ -14,9 +14,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.ml.MachineLearning; +import org.yaml.snakeyaml.util.UriEncoder; import java.io.IOException; -import java.net.URLEncoder; import java.util.Collections; import java.util.List; import java.util.Map; @@ -303,7 +303,7 @@ private Response createFarequoteJob(String jobId) throws Exception { } xContentBuilder.endObject(); - Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + URLEncoder.encode(jobId, "UTF-8")); + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + UriEncoder.encode(jobId)); request.setJsonEntity(Strings.toString(xContentBuilder)); return client().performRequest(request); } diff --git a/x-pack/plugin/ml/qa/disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle index 3e7f8b5ca7f50..f066e7e5bcfaf 100644 --- a/x-pack/plugin/ml/qa/disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -8,7 +8,7 @@ dependencies { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.ml.enabled', 'false' diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index 6077b8ab099f6..a39eb0d5b24e4 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -75,9 +75,9 @@ integTest.runner { 'ml/evaluate_data_frame/Test given missing index', 'ml/evaluate_data_frame/Test given index does not exist', 'ml/evaluate_data_frame/Test given missing evaluation', - 'ml/evaluate_data_frame/Test binary_soft_classifition auc_roc given actual_field is always true', - 'ml/evaluate_data_frame/Test binary_soft_classifition auc_roc given actual_field is always false', - 'ml/evaluate_data_frame/Test binary_soft_classification given evaluation with emtpy metrics', + 'ml/evaluate_data_frame/Test binary_soft_classification auc_roc given actual_field is always true', + 'ml/evaluate_data_frame/Test binary_soft_classification auc_roc given actual_field is always false', + 'ml/evaluate_data_frame/Test binary_soft_classification given evaluation with empty metrics', 'ml/evaluate_data_frame/Test binary_soft_classification given missing actual_field', 'ml/evaluate_data_frame/Test binary_soft_classification given missing predicted_probability_field', 'ml/evaluate_data_frame/Test binary_soft_classification given precision with threshold less than zero', @@ -86,6 +86,7 @@ integTest.runner { 'ml/evaluate_data_frame/Test binary_soft_classification given precision with empty thresholds', 'ml/evaluate_data_frame/Test binary_soft_classification given recall with empty thresholds', 'ml/evaluate_data_frame/Test binary_soft_classification given confusion_matrix with empty thresholds', + 'ml/evaluate_data_frame/Test regression given evaluation with empty metrics', 'ml/delete_job_force/Test cannot force delete a non-existent job', 'ml/delete_model_snapshot/Test delete snapshot missing snapshotId', 'ml/delete_model_snapshot/Test delete snapshot missing job_id', @@ -165,7 +166,7 @@ integTest.runner { testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' extraConfigFile 'roles.yml', file('roles.yml') user username: "x_pack_rest_user", password: "x-pack-test-password" user username: "ml_admin", password: "x-pack-test-password", role: "minimal,machine_learning_admin" diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index 82b8e5cd7e101..4c6c8b49fa634 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -40,11 +40,12 @@ integTest { testClusters.integTest { numberOfNodes = 3 - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' setting 'xpack.ml.enabled', 'true' setting 'xpack.watcher.enabled', 'false' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.security.authc.token.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index a01d23fca62e0..781ea094302a4 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -27,6 +27,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.anyOf; /** * A set of tests that ensure we comply to the model memory limit @@ -38,7 +39,6 @@ public void cleanUpTest() { cleanUp(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/43013") public void testTooManyPartitions() throws Exception { assumeFalse("AwaitsFix(bugUrl = \"https://github.com/elastic/elasticsearch/issues/32033\")", Constants.WINDOWS); Detector.Builder detector = new Detector.Builder("count", null); @@ -80,9 +80,10 @@ public void testTooManyPartitions() throws Exception { // Assert we haven't violated the limit too much GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); - assertThat(modelSizeStats.getModelBytes(), lessThan(31500000L)); + assertThat(modelSizeStats.getModelBytes(), lessThan(32000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(24000000L)); - assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); + assertThat(modelSizeStats.getMemoryStatus(), anyOf(equalTo(ModelSizeStats.MemoryStatus.SOFT_LIMIT), + equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT))); } public void testTooManyByFields() throws Exception { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java index d1decd4387f8f..967603f66a117 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.apache.lucene.util.Constants; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; @@ -32,6 +33,7 @@ public void tearDownData() { } public void testDefaultRenormalization() throws Exception { + assumeFalse("https://github.com/elastic/elasticsearch/issues/44613", Constants.WINDOWS); String jobId = "basic-renormalization-it-test-default-renormalization-job"; createAndRunJob(jobId, null); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java index 9b6523eb73cc2..8953c65d15b0c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java @@ -10,6 +10,8 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; @@ -106,15 +108,15 @@ public void testDatafeedTimingStats_DatafeedRecreated() throws Exception { Instant now = Instant.now(); indexDocs(logger, "data", numDocs, now.minus(Duration.ofDays(14)).toEpochMilli(), now.toEpochMilli()); - Job.Builder job = createScheduledJob("lookback-job"); + Job.Builder job = createScheduledJob("lookback-job-datafeed-recreated"); - String datafeedId = "lookback-datafeed"; + String datafeedId = "lookback-datafeed-datafeed-recreated"; DatafeedConfig datafeedConfig = createDatafeed(datafeedId, job.getId(), Arrays.asList("data")); registerJob(job); putJob(job); - for (int i = 0; i < 2; ++i) { + CheckedRunnable openAndRunJob = () -> { openJob(job.getId()); assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED)); registerDatafeed(datafeedConfig); @@ -129,7 +131,10 @@ public void testDatafeedTimingStats_DatafeedRecreated() throws Exception { }, 60, TimeUnit.SECONDS); deleteDatafeed(datafeedId); waitUntilJobIsClosed(job.getId()); - } + }; + + openAndRunJob.run(); + openAndRunJob.run(); } public void testDatafeedTimingStats_DatafeedJobIdUpdated() throws Exception { @@ -140,8 +145,8 @@ public void testDatafeedTimingStats_DatafeedJobIdUpdated() throws Exception { Instant now = Instant.now(); indexDocs(logger, "data", numDocs, now.minus(Duration.ofDays(14)).toEpochMilli(), now.toEpochMilli()); - Job.Builder jobA = createScheduledJob("lookback-job"); - Job.Builder jobB = createScheduledJob("other-lookback-job"); + Job.Builder jobA = createScheduledJob("lookback-job-jobid-updated"); + Job.Builder jobB = createScheduledJob("other-lookback-job-jobid-updated"); for (Job.Builder job : Arrays.asList(jobA, jobB)) { registerJob(job); putJob(job); @@ -152,11 +157,10 @@ public void testDatafeedTimingStats_DatafeedJobIdUpdated() throws Exception { registerDatafeed(datafeedConfig); putDatafeed(datafeedConfig); - for (Job.Builder job : Arrays.asList(jobA, jobB, jobA)) { + CheckedConsumer openAndRunJob = job -> { openJob(job.getId()); assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED)); // Bind datafeedId to the current job on the list, timing stats are wiped out. - updateDatafeed(new DatafeedUpdate.Builder(datafeedId).setJobId(job.getId()).build()); // Datafeed did not do anything yet, hence search_count is equal to 0. assertDatafeedStats(datafeedId, DatafeedState.STOPPED, job.getId(), equalTo(0L)); startDatafeed(datafeedId, 0L, now.toEpochMilli()); @@ -166,7 +170,13 @@ public void testDatafeedTimingStats_DatafeedJobIdUpdated() throws Exception { assertDatafeedStats(datafeedId, DatafeedState.STOPPED, job.getId(), greaterThan(0L)); }, 60, TimeUnit.SECONDS); waitUntilJobIsClosed(job.getId()); - } + }; + + openAndRunJob.accept(jobA); + updateDatafeed(new DatafeedUpdate.Builder(datafeedId).setJobId(jobB.getId()).build()); // wipes out timing stats + openAndRunJob.accept(jobB); + updateDatafeed(new DatafeedUpdate.Builder(datafeedId).setJobId(jobA.getId()).build()); // wipes out timing stats + openAndRunJob.accept(jobA); } public void testDatafeedTimingStats_QueryDelayUpdated_TimingStatsNotReset() throws Exception { @@ -177,11 +187,11 @@ public void testDatafeedTimingStats_QueryDelayUpdated_TimingStatsNotReset() thro Instant now = Instant.now(); indexDocs(logger, "data", numDocs, now.minus(Duration.ofDays(14)).toEpochMilli(), now.toEpochMilli()); - Job.Builder job = createScheduledJob("lookback-job"); + Job.Builder job = createScheduledJob("lookback-job-query-delay-updated"); registerJob(job); putJob(job); - String datafeedId = "lookback-datafeed"; + String datafeedId = "lookback-datafeed-query-delay-updated"; DatafeedConfig datafeedConfig = createDatafeed(datafeedId, job.getId(), Arrays.asList("data")); registerDatafeed(datafeedConfig); putDatafeed(datafeedConfig); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index 50554fdd05af7..6b7a498c89354 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -211,6 +212,7 @@ public void testMemoryStatus() throws Exception { } public void testOverflowToDisk() throws Exception { + assumeFalse("https://github.com/elastic/elasticsearch/issues/44609", Constants.WINDOWS); Detector.Builder detector = new Detector.Builder("mean", "value"); detector.setByFieldName("clientIP"); diff --git a/x-pack/plugin/ml/qa/single-node-tests/build.gradle b/x-pack/plugin/ml/qa/single-node-tests/build.gradle index 036b46cb0ca99..c5f14a71f39df 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/single-node-tests/build.gradle @@ -8,7 +8,7 @@ dependencies { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java index 1d230d93792fc..79ff292fb823a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java @@ -294,8 +294,8 @@ public static PersistentTasksCustomMetaData rewritePersistentTaskParams(Map unallocatedJobTasks = MlTasks.unallocatedJobTasks(currentTasks, nodes); - Collection unallocatedDatafeedsTasks = + Collection> unallocatedJobTasks = MlTasks.unallocatedJobTasks(currentTasks, nodes); + Collection> unallocatedDatafeedsTasks = MlTasks.unallocatedDatafeedTasks(currentTasks, nodes); if (unallocatedJobTasks.isEmpty() && unallocatedDatafeedsTasks.isEmpty()) { @@ -304,7 +304,7 @@ public static PersistentTasksCustomMetaData rewritePersistentTaskParams(Map jobTask : unallocatedJobTasks) { OpenJobAction.JobParams originalParams = (OpenJobAction.JobParams) jobTask.getParams(); if (originalParams.getJob() == null) { Job job = jobs.get(originalParams.getJobId()); @@ -325,7 +325,7 @@ public static PersistentTasksCustomMetaData rewritePersistentTaskParams(Map datafeedTask : unallocatedDatafeedsTasks) { StartDatafeedAction.DatafeedParams originalParams = (StartDatafeedAction.DatafeedParams) datafeedTask.getParams(); if (originalParams.getJobId() == null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java index d4cdb48dd5afe..1205590a8095d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java @@ -25,8 +25,6 @@ import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; -import java.util.function.Supplier; - import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -40,8 +38,7 @@ public class TransportDeleteCalendarAction extends HandledTransportAction) DeleteCalendarAction.Request::new); + super(DeleteCalendarAction.NAME, transportService, actionFilters, DeleteCalendarAction.Request::new); this.client = client; this.jobManager = jobManager; this.jobResultsProvider = jobResultsProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java index dcc72bc8c75aa..b727de2e8be3d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java @@ -65,11 +65,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java index 56c54bbe9547c..cada114b9f038 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java @@ -55,7 +55,7 @@ public TransportDeleteDatafeedAction(Settings settings, TransportService transpo Client client, PersistentTasksService persistentTasksService, NamedXContentRegistry xContentRegistry) { super(DeleteDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, DeleteDatafeedAction.Request::new); + DeleteDatafeedAction.Request::new, indexNameExpressionResolver); this.client = client; this.datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); this.persistentTasksService = persistentTasksService; @@ -73,11 +73,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, DeleteDatafeedAction.Request request, ClusterState state, ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java index a94436d5c6920..67aa0ed3828a6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java @@ -31,7 +31,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -45,8 +44,7 @@ public class TransportDeleteFilterAction extends HandledTransportAction) DeleteFilterAction.Request::new); + super(DeleteFilterAction.NAME, transportService, actionFilters, DeleteFilterAction.Request::new); this.client = client; this.jobConfigProvider = jobConfigProvider; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index b40f49523c751..729da576ff4b3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -50,11 +50,11 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; import org.elasticsearch.xpack.core.ml.action.KillProcessAction; -import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; @@ -117,7 +117,7 @@ public TransportDeleteJobAction(Settings settings, TransportService transportSer JobConfigProvider jobConfigProvider, DatafeedConfigProvider datafeedConfigProvider, MlMemoryTracker memoryTracker) { super(DeleteJobAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, DeleteJobAction.Request::new); + DeleteJobAction.Request::new, indexNameExpressionResolver); this.client = client; this.persistentTasksService = persistentTasksService; this.auditor = auditor; @@ -139,11 +139,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterBlockException checkBlock(DeleteJobAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java index 24b8998763ec8..e31ff5c26fd59 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java @@ -47,7 +47,7 @@ public TransportFinalizeJobExecutionAction(TransportService transportService, Cl ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client) { super(FinalizeJobExecutionAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, FinalizeJobExecutionAction.Request::new); + FinalizeJobExecutionAction.Request::new, indexNameExpressionResolver); this.client = client; } @@ -61,11 +61,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, FinalizeJobExecutionAction.Request request, ClusterState state, ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetBucketsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetBucketsAction.java index 0f16b0f92dd13..bd6e5e70a713e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetBucketsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetBucketsAction.java @@ -17,8 +17,6 @@ import org.elasticsearch.xpack.ml.job.persistence.BucketsQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; -import java.util.function.Supplier; - public class TransportGetBucketsAction extends HandledTransportAction { private final JobResultsProvider jobResultsProvider; @@ -28,7 +26,7 @@ public class TransportGetBucketsAction extends HandledTransportAction) GetBucketsAction.Request::new); + super(GetBucketsAction.NAME, transportService, actionFilters, GetBucketsAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.jobManager = jobManager; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java index c8b56ed63651c..cac3f8127f167 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java @@ -11,9 +11,9 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; -import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import java.util.Collections; -import java.util.function.Supplier; public class TransportGetCalendarEventsAction extends HandledTransportAction { @@ -34,8 +33,7 @@ public class TransportGetCalendarEventsAction extends HandledTransportAction) GetCalendarEventsAction.Request::new); + super(GetCalendarEventsAction.NAME, transportService, actionFilters, GetCalendarEventsAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.jobConfigProvider = jobConfigProvider; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarsAction.java index fe2c3817eb2c0..4db204815eb46 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarsAction.java @@ -27,8 +27,7 @@ public class TransportGetCalendarsAction extends HandledTransportAction { private final JobResultsProvider jobResultsProvider; @@ -27,8 +25,7 @@ public class TransportGetCategoriesAction extends HandledTransportAction) GetCategoriesAction.Request::new); + super(GetCategoriesAction.NAME, transportService, actionFilters, GetCategoriesAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.client = client; this.jobManager = jobManager; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java index 89f14f717064e..c3a0683294437 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java @@ -15,16 +15,18 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; -import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -55,8 +57,8 @@ protected String executor() { } @Override - protected GetDatafeedsAction.Response newResponse() { - return new GetDatafeedsAction.Response(); + protected GetDatafeedsAction.Response read(StreamInput in) throws IOException { + return new GetDatafeedsAction.Response(in); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index a759757c7d80d..f50ce26132b9f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -15,19 +15,21 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; -import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; +import java.io.IOException; import java.util.List; import java.util.stream.Collectors; @@ -54,8 +56,8 @@ protected String executor() { } @Override - protected GetDatafeedsStatsAction.Response newResponse() { - return new GetDatafeedsStatsAction.Response(); + protected GetDatafeedsStatsAction.Response read(StreamInput in) throws IOException { + return new GetDatafeedsStatsAction.Response(in); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetInfluencersAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetInfluencersAction.java index e0891e85bc7ba..ca139e46965d8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetInfluencersAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetInfluencersAction.java @@ -17,8 +17,6 @@ import org.elasticsearch.xpack.ml.job.persistence.InfluencersQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; -import java.util.function.Supplier; - public class TransportGetInfluencersAction extends HandledTransportAction { private final JobResultsProvider jobResultsProvider; @@ -28,8 +26,7 @@ public class TransportGetInfluencersAction extends HandledTransportAction) GetInfluencersAction.Request::new); + super(GetInfluencersAction.NAME, transportService, actionFilters, GetInfluencersAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.client = client; this.jobManager = jobManager; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java index ba81bfac88277..98b7ece2dcb60 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java @@ -14,12 +14,15 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetJobsAction; import org.elasticsearch.xpack.ml.job.JobManager; +import java.io.IOException; + public class TransportGetJobsAction extends TransportMasterNodeReadAction { private final JobManager jobManager; @@ -40,8 +43,8 @@ protected String executor() { } @Override - protected GetJobsAction.Response newResponse() { - return new GetJobsAction.Response(); + protected GetJobsAction.Response read(StreamInput in) throws IOException { + return new GetJobsAction.Response(in); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java index e3af3b7ac64ce..8ac9390f8065a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java @@ -25,8 +25,8 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction; import org.elasticsearch.xpack.core.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.results.Bucket; @@ -43,10 +43,10 @@ import org.elasticsearch.xpack.ml.job.persistence.overallbuckets.OverallBucketsProvider; import org.elasticsearch.xpack.ml.utils.MlIndicesUtils; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -65,8 +65,7 @@ public class TransportGetOverallBucketsAction extends HandledTransportAction) GetOverallBucketsAction.Request::new); + super(GetOverallBucketsAction.NAME, transportService, actionFilters, GetOverallBucketsAction.Request::new); this.threadPool = threadPool; this.clusterService = clusterService; this.client = client; @@ -79,7 +78,8 @@ protected void doExecute(Task task, GetOverallBucketsAction.Request request, jobManager.expandJobs(request.getJobId(), request.allowNoJobs(), ActionListener.wrap( jobPage -> { if (jobPage.count() == 0) { - listener.onResponse(new GetOverallBucketsAction.Response()); + listener.onResponse(new GetOverallBucketsAction.Response( + new QueryPage<>(Collections.emptyList(), 0, Job.RESULTS_FIELD))); return; } @@ -108,7 +108,7 @@ private void getOverallBuckets(GetOverallBucketsAction.Request request, List chunkedBucketSearcherListener = ActionListener.wrap(searcher -> { if (searcher == null) { - listener.onResponse(new GetOverallBucketsAction.Response()); + listener.onResponse(new GetOverallBucketsAction.Response(new QueryPage<>(Collections.emptyList(), 0, Job.RESULTS_FIELD))); return; } searcher.searchAndComputeOverallBuckets(overallBucketsListener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java index d916bfebbc4c3..efbc4107884ae 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java @@ -17,8 +17,6 @@ import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.RecordsQueryBuilder; -import java.util.function.Supplier; - public class TransportGetRecordsAction extends HandledTransportAction { private final JobResultsProvider jobResultsProvider; @@ -28,7 +26,7 @@ public class TransportGetRecordsAction extends HandledTransportAction) GetRecordsAction.Request::new); + super(GetRecordsAction.NAME, transportService, actionFilters, GetRecordsAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.jobManager = jobManager; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java index 58bb42a5e3e79..0c76d62ced94a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeoutException; -import java.util.function.Supplier; public class TransportMlInfoAction extends HandledTransportAction { @@ -35,8 +34,7 @@ public class TransportMlInfoAction extends HandledTransportAction) MlInfoAction.Request::new); - + super(MlInfoAction.NAME, transportService, actionFilters, MlInfoAction.Request::new); this.clusterService = clusterService; try { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 1be58fa6998ff..b2f94020fbb9b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -96,8 +96,8 @@ public TransportOpenJobAction(Settings settings, TransportService transportServi PersistentTasksService persistentTasksService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, JobConfigProvider jobConfigProvider, MlMemoryTracker memoryTracker) { - super(OpenJobAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - OpenJobAction.Request::new); + super(OpenJobAction.NAME, transportService, clusterService, threadPool, actionFilters,OpenJobAction.Request::new, + indexNameExpressionResolver); this.licenseState = licenseState; this.persistentTasksService = persistentTasksService; this.jobConfigProvider = jobConfigProvider; @@ -128,9 +128,11 @@ static void validate(String jobId, Job job) { static String[] indicesOfInterest(String resultsIndex) { if (resultsIndex == null) { - return new String[]{AnomalyDetectorsIndex.jobStateIndexPattern(), MlMetaIndex.INDEX_NAME}; + return new String[]{AnomalyDetectorsIndex.jobStateIndexPattern(), MlMetaIndex.INDEX_NAME, + AnomalyDetectorsIndex.configIndexName()}; } - return new String[]{AnomalyDetectorsIndex.jobStateIndexPattern(), resultsIndex, MlMetaIndex.INDEX_NAME}; + return new String[]{AnomalyDetectorsIndex.jobStateIndexPattern(), resultsIndex, MlMetaIndex.INDEX_NAME, + AnomalyDetectorsIndex.configIndexName()}; } static List verifyIndicesPrimaryShardsAreActive(String resultsWriteIndex, ClusterState clusterState) { @@ -195,11 +197,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected ClusterBlockException checkBlock(OpenJobAction.Request request, ClusterState state) { // We only delegate here to PersistentTasksService, but if there is a metadata writeblock, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index 7e76fc0c7cecb..d3ba2db506a95 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -33,7 +33,6 @@ import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.Optional; -import java.util.function.Supplier; import java.util.stream.Collectors; public class TransportPreviewDatafeedAction extends HandledTransportAction { @@ -51,8 +50,7 @@ public TransportPreviewDatafeedAction(ThreadPool threadPool, TransportService tr ActionFilters actionFilters, Client client, JobConfigProvider jobConfigProvider, DatafeedConfigProvider datafeedConfigProvider, JobResultsProvider jobResultsProvider, JobResultsPersister jobResultsPersister, NamedXContentRegistry xContentRegistry) { - super(PreviewDatafeedAction.NAME, transportService, actionFilters, - (Supplier) PreviewDatafeedAction.Request::new); + super(PreviewDatafeedAction.NAME, transportService, actionFilters, PreviewDatafeedAction.Request::new); this.threadPool = threadPool; this.client = client; this.jobConfigProvider = jobConfigProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java index 7ff04f5ed018a..d80a1169185e9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java @@ -29,7 +29,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -40,8 +39,7 @@ public class TransportPutCalendarAction extends HandledTransportAction) PutCalendarAction.Request::new); + super(PutCalendarAction.NAME, transportService, actionFilters, PutCalendarAction.Request::new); this.client = client; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java index d8f5dbb469f5f..c70093bd885f7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java @@ -5,11 +5,15 @@ */ package org.elasticsearch.xpack.ml.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -29,6 +33,8 @@ import org.elasticsearch.xpack.core.ml.action.PutDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlStrings; import org.elasticsearch.xpack.core.security.SecurityContext; @@ -43,12 +49,14 @@ import java.io.IOException; import java.time.Instant; +import java.util.Map; import java.util.Objects; -import java.util.function.Supplier; public class TransportPutDataFrameAnalyticsAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportPutDataFrameAnalyticsAction.class); + private final XPackLicenseState licenseState; private final DataFrameAnalyticsConfigProvider configProvider; private final ThreadPool threadPool; @@ -64,8 +72,7 @@ public TransportPutDataFrameAnalyticsAction(Settings settings, TransportService XPackLicenseState licenseState, Client client, ThreadPool threadPool, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, DataFrameAnalyticsConfigProvider configProvider) { - super(PutDataFrameAnalyticsAction.NAME, transportService, actionFilters, - (Supplier) PutDataFrameAnalyticsAction.Request::new); + super(PutDataFrameAnalyticsAction.NAME, transportService, actionFilters, PutDataFrameAnalyticsAction.Request::new); this.licenseState = licenseState; this.configProvider = configProvider; this.threadPool = threadPool; @@ -97,6 +104,7 @@ protected void doExecute(Task task, PutDataFrameAnalyticsAction.Request request, .setCreateTime(Instant.now()) .setVersion(Version.CURRENT) .build(); + if (licenseState.isAuthAllowed()) { final String username = securityContext.getUser().principal(); RoleDescriptor.IndicesPrivileges sourceIndexPrivileges = RoleDescriptor.IndicesPrivileges.builder() @@ -120,9 +128,12 @@ protected void doExecute(Task task, PutDataFrameAnalyticsAction.Request request, client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); } else { - configProvider.put(memoryCappedConfig, threadPool.getThreadContext().getHeaders(), ActionListener.wrap( - indexResponse -> listener.onResponse(new PutDataFrameAnalyticsAction.Response(memoryCappedConfig)), - listener::onFailure + updateDocMappingAndPutConfig( + memoryCappedConfig, + threadPool.getThreadContext().getHeaders(), + ActionListener.wrap( + indexResponse -> listener.onResponse(new PutDataFrameAnalyticsAction.Response(memoryCappedConfig)), + listener::onFailure )); } } @@ -131,9 +142,12 @@ private void handlePrivsResponse(String username, DataFrameAnalyticsConfig memor HasPrivilegesResponse response, ActionListener listener) throws IOException { if (response.isCompleteMatch()) { - configProvider.put(memoryCappedConfig, threadPool.getThreadContext().getHeaders(), ActionListener.wrap( - indexResponse -> listener.onResponse(new PutDataFrameAnalyticsAction.Response(memoryCappedConfig)), - listener::onFailure + updateDocMappingAndPutConfig( + memoryCappedConfig, + threadPool.getThreadContext().getHeaders(), + ActionListener.wrap( + indexResponse -> listener.onResponse(new PutDataFrameAnalyticsAction.Response(memoryCappedConfig)), + listener::onFailure )); } else { XContentBuilder builder = JsonXContent.contentBuilder(); @@ -150,6 +164,25 @@ private void handlePrivsResponse(String username, DataFrameAnalyticsConfig memor } } + private void updateDocMappingAndPutConfig(DataFrameAnalyticsConfig config, + Map headers, + ActionListener listener) { + ClusterState clusterState = clusterService.state(); + if (clusterState == null) { + logger.warn("Cannot update doc mapping because clusterState == null"); + configProvider.put(config, headers, listener); + return; + } + ElasticsearchMappings.addDocMappingIfMissing( + AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings::configMapping, + client, + clusterState, + ActionListener.wrap( + unused -> configProvider.put(config, headers, listener), + listener::onFailure)); + } + private void validateConfig(DataFrameAnalyticsConfig config) { if (MlStrings.isValidId(config.getId()) == false) { throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INVALID_ID, DataFrameAnalyticsConfig.ID, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index 5b8e91cc65280..20bf20a65d3f8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.ml.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchAction; @@ -20,6 +22,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -36,6 +39,8 @@ import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; @@ -58,6 +63,8 @@ public class TransportPutDatafeedAction extends TransportMasterNodeAction { + private static final Logger logger = LogManager.getLogger(TransportPutDatafeedAction.class); + private final XPackLicenseState licenseState; private final Client client; private final SecurityContext securityContext; @@ -72,7 +79,7 @@ public TransportPutDatafeedAction(Settings settings, TransportService transportS IndexNameExpressionResolver indexNameExpressionResolver, NamedXContentRegistry xContentRegistry) { super(PutDatafeedAction.NAME, transportService, clusterService, threadPool, - actionFilters, indexNameExpressionResolver, PutDatafeedAction.Request::new); + actionFilters, PutDatafeedAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; this.client = client; this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings) ? @@ -88,8 +95,8 @@ protected String executor() { } @Override - protected PutDatafeedAction.Response newResponse() { - return new PutDatafeedAction.Response(); + protected PutDatafeedAction.Response read(StreamInput in) throws IOException { + return new PutDatafeedAction.Response(in); } @Override @@ -111,7 +118,7 @@ protected void masterOperation(Task task, PutDatafeedAction.Request request, Clu .indices(indices); ActionListener privResponseListener = ActionListener.wrap( - r -> handlePrivsResponse(username, request, r, listener), + r -> handlePrivsResponse(username, request, r, state, listener), listener::onFailure); ActionListener getRollupIndexCapsActionHandler = ActionListener.wrap( @@ -145,15 +152,17 @@ protected void masterOperation(Task task, PutDatafeedAction.Request request, Clu } } else { - putDatafeed(request, threadPool.getThreadContext().getHeaders(), listener); + putDatafeed(request, threadPool.getThreadContext().getHeaders(), state, listener); } } - private void handlePrivsResponse(String username, PutDatafeedAction.Request request, + private void handlePrivsResponse(String username, + PutDatafeedAction.Request request, HasPrivilegesResponse response, + ClusterState clusterState, ActionListener listener) throws IOException { if (response.isCompleteMatch()) { - putDatafeed(request, threadPool.getThreadContext().getHeaders(), listener); + putDatafeed(request, threadPool.getThreadContext().getHeaders(), clusterState, listener); } else { XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -169,7 +178,9 @@ private void handlePrivsResponse(String username, PutDatafeedAction.Request requ } } - private void putDatafeed(PutDatafeedAction.Request request, Map headers, + private void putDatafeed(PutDatafeedAction.Request request, + Map headers, + ClusterState clusterState, ActionListener listener) { String datafeedId = request.getDatafeed().getId(); @@ -181,13 +192,30 @@ private void putDatafeed(PutDatafeedAction.Request request, Map } DatafeedConfig.validateAggregations(request.getDatafeed().getParsedAggregations(xContentRegistry)); - CheckedConsumer validationOk = ok -> { - datafeedConfigProvider.putDatafeedConfig(request.getDatafeed(), headers, ActionListener.wrap( + CheckedConsumer mappingsUpdated = ok -> { + datafeedConfigProvider.putDatafeedConfig( + request.getDatafeed(), + headers, + ActionListener.wrap( indexResponse -> listener.onResponse(new PutDatafeedAction.Response(request.getDatafeed())), listener::onFailure )); }; + CheckedConsumer validationOk = ok -> { + if (clusterState == null) { + logger.warn("Cannot update doc mapping because clusterState == null"); + mappingsUpdated.accept(false); + return; + } + ElasticsearchMappings.addDocMappingIfMissing( + AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings::configMapping, + client, + clusterState, + ActionListener.wrap(mappingsUpdated, listener::onFailure)); + }; + CheckedConsumer jobOk = ok -> jobConfigProvider.validateDatafeedJob(request.getDatafeed(), ActionListener.wrap(validationOk, listener::onFailure)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java index 0c1f37f4256cb..b9c8afa93400f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java @@ -30,7 +30,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -41,7 +40,7 @@ public class TransportPutFilterAction extends HandledTransportAction) PutFilterAction.Request::new); + super(PutFilterAction.NAME, transportService, actionFilters, PutFilterAction.Request::new); this.client = client; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java index 5e17a818b98f6..aefbb9538bd54 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; @@ -24,6 +25,8 @@ import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.ml.job.JobManager; +import java.io.IOException; + public class TransportPutJobAction extends TransportMasterNodeAction { private final JobManager jobManager; @@ -35,8 +38,8 @@ public TransportPutJobAction(TransportService transportService, ClusterService c ThreadPool threadPool, XPackLicenseState licenseState, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, JobManager jobManager, AnalysisRegistry analysisRegistry) { - super(PutJobAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, PutJobAction.Request::new); + super(PutJobAction.NAME, transportService, clusterService, threadPool, actionFilters, PutJobAction.Request::new, + indexNameExpressionResolver); this.licenseState = licenseState; this.jobManager = jobManager; this.analysisRegistry = analysisRegistry; @@ -48,8 +51,8 @@ protected String executor() { } @Override - protected PutJobAction.Response newResponse() { - return new PutJobAction.Response(); + protected PutJobAction.Response read(StreamInput in) throws IOException { + return new PutJobAction.Response(in); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java index 5a02cb165d9a0..1a5ba7629ca8e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; @@ -34,6 +35,7 @@ import org.elasticsearch.xpack.ml.job.persistence.JobDataDeleter; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; +import java.io.IOException; import java.util.Date; import java.util.function.Consumer; @@ -52,7 +54,7 @@ public TransportRevertModelSnapshotAction(Settings settings, ThreadPool threadPo JobManager jobManager, JobResultsProvider jobResultsProvider, ClusterService clusterService, Client client, JobDataCountsPersister jobDataCountsPersister) { super(RevertModelSnapshotAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, RevertModelSnapshotAction.Request::new); + RevertModelSnapshotAction.Request::new, indexNameExpressionResolver); this.client = client; this.jobManager = jobManager; this.jobResultsProvider = jobResultsProvider; @@ -66,8 +68,8 @@ protected String executor() { } @Override - protected RevertModelSnapshotAction.Response newResponse() { - return new RevertModelSnapshotAction.Response(); + protected RevertModelSnapshotAction.Response read(StreamInput in) throws IOException { + return new RevertModelSnapshotAction.Response(in); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java index 5d5bf5efb8d02..b8bf4961700be 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -67,8 +67,8 @@ public TransportSetUpgradeModeAction(TransportService transportService, ThreadPo PersistentTasksClusterService persistentTasksClusterService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client, PersistentTasksService persistentTasksService) { - super(SetUpgradeModeAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - SetUpgradeModeAction.Request::new); + super(SetUpgradeModeAction.NAME, transportService, clusterService, threadPool, actionFilters, SetUpgradeModeAction.Request::new, + indexNameExpressionResolver); this.persistentTasksClusterService = persistentTasksClusterService; this.clusterService = clusterService; this.client = client; @@ -85,11 +85,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, SetUpgradeModeAction.Request request, ClusterState state, ActionListener listener) throws Exception { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java index 1d878adbc1d11..b11b7236ca5d8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.Client; @@ -23,6 +24,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; @@ -49,6 +51,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; @@ -60,6 +63,8 @@ import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; @@ -106,11 +111,6 @@ protected String executor() { return ThreadPool.Names.SAME; } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); @@ -375,6 +375,19 @@ private void cancelReindexingTask(String reason, TimeValue timeout) { } } + static List verifyIndicesPrimaryShardsAreActive(ClusterState clusterState, String... indexNames) { + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); + String[] concreteIndices = resolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), indexNames); + List unavailableIndices = new ArrayList<>(concreteIndices.length); + for (String index : concreteIndices) { + IndexRoutingTable routingTable = clusterState.getRoutingTable().index(index); + if (routingTable == null || routingTable.allPrimaryShardsActive() == false) { + unavailableIndices.add(index); + } + } + return unavailableIndices; + } + public static class TaskExecutor extends PersistentTasksExecutor { private final Client client; @@ -422,11 +435,20 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(StartDataFrameAnal String id = params.getId(); + List unavailableIndices = verifyIndicesPrimaryShardsAreActive(clusterState, AnomalyDetectorsIndex.configIndexName()); + if (unavailableIndices.size() != 0) { + String reason = "Not opening data frame analytics job [" + id + + "], because not all primary shards are active for the following indices [" + String.join(",", unavailableIndices) + "]"; + LOGGER.debug(reason); + return new PersistentTasksCustomMetaData.Assignment(null, reason); + } + boolean isMemoryTrackerRecentlyRefreshed = memoryTracker.isRecentlyRefreshed(); if (isMemoryTrackerRecentlyRefreshed == false) { boolean scheduledRefresh = memoryTracker.asyncRefresh(); if (scheduledRefresh) { - String reason = "Not opening job [" + id + "] because job memory requirements are stale - refresh requested"; + String reason = "Not opening data frame analytics job [" + id + + "] because job memory requirements are stale - refresh requested"; LOGGER.debug(reason); return new PersistentTasksCustomMetaData.Assignment(null, reason); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index fceabfa9e99e7..3d310423866d5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -95,8 +95,8 @@ public TransportStartDatafeedAction(Settings settings, TransportService transpor ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client, JobConfigProvider jobConfigProvider, DatafeedConfigProvider datafeedConfigProvider, JobResultsPersister jobResultsPersister, Auditor auditor, NamedXContentRegistry xContentRegistry) { - super(StartDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - StartDatafeedAction.Request::new); + super(StartDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, StartDatafeedAction.Request::new, + indexNameExpressionResolver); this.licenseState = licenseState; this.persistentTasksService = persistentTasksService; this.client = client; @@ -146,11 +146,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, StartDatafeedAction.Request request, ClusterState state, ActionListener listener) { @@ -256,7 +251,7 @@ private void createDataExtractor(Job job, DatafeedConfig datafeed, StartDatafeed datafeed, job, xContentRegistry, - // Creating fake {@link TimingStatsReporter} so that search API call is not needed. + // Creating fake DatafeedTimingStatsReporter so that search API call is not needed. new DatafeedTimingStatsReporter(new DatafeedTimingStats(job.getId()), jobResultsPersister), ActionListener.wrap( unused -> diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java index a8fdc43642014..6e14d1a5f4819 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -34,10 +35,12 @@ import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobDataDeleter; +import java.io.IOException; import java.util.Collections; import java.util.Map; -public class TransportUpdateDatafeedAction extends TransportMasterNodeAction { +public class TransportUpdateDatafeedAction extends + TransportMasterNodeAction { private final Client client; private final DatafeedConfigProvider datafeedConfigProvider; @@ -49,8 +52,8 @@ public TransportUpdateDatafeedAction(Settings settings, TransportService transpo ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client, NamedXContentRegistry xContentRegistry) { - super(UpdateDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, UpdateDatafeedAction.Request::new); + super(UpdateDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, UpdateDatafeedAction.Request::new, + indexNameExpressionResolver); this.client = client; this.datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); @@ -64,8 +67,8 @@ protected String executor() { } @Override - protected PutDatafeedAction.Response newResponse() { - return new PutDatafeedAction.Response(); + protected PutDatafeedAction.Response read(StreamInput in) throws IOException { + return new PutDatafeedAction.Response(in); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java index 1fe298fe8cc51..49d8388669100 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java @@ -44,7 +44,6 @@ import java.util.Collections; import java.util.SortedSet; import java.util.TreeSet; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -57,8 +56,7 @@ public class TransportUpdateFilterAction extends HandledTransportAction) UpdateFilterAction.Request::new); + super(UpdateFilterAction.NAME, transportService, actionFilters, UpdateFilterAction.Request::new); this.client = client; this.jobManager = jobManager; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateJobAction.java index 5505669d9ea4e..e1666d11236fa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateJobAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -21,6 +22,8 @@ import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.ml.job.JobManager; +import java.io.IOException; + public class TransportUpdateJobAction extends TransportMasterNodeAction { private final JobManager jobManager; @@ -29,8 +32,8 @@ public class TransportUpdateJobAction extends TransportMasterNodeAction applyUpdate(UpdateModelSnapshotAction.Reque if (request.getRetain() != null) { updatedSnapshotBuilder.setRetain(request.getRetain()); } - return new Result(target.index, updatedSnapshotBuilder.build()); + return new Result<>(target.index, updatedSnapshotBuilder.build()); } private void indexModelSnapshot(Result modelSnapshot, Consumer handler, Consumer errorHandler) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateDetectorAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateDetectorAction.java index 197f9e3284823..d2904f8da75e5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateDetectorAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateDetectorAction.java @@ -14,14 +14,11 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; -import java.util.function.Supplier; - public class TransportValidateDetectorAction extends HandledTransportAction { @Inject public TransportValidateDetectorAction(TransportService transportService, ActionFilters actionFilters) { - super(ValidateDetectorAction.NAME, transportService, actionFilters, - (Supplier) ValidateDetectorAction.Request::new); + super(ValidateDetectorAction.NAME, transportService, actionFilters, ValidateDetectorAction.Request::new); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java index 7311ab2502f25..313edb22c4794 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java @@ -14,14 +14,11 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; -import java.util.function.Supplier; - public class TransportValidateJobConfigAction extends HandledTransportAction { @Inject public TransportValidateJobConfigAction(TransportService transportService, ActionFilters actionFilters) { - super(ValidateJobConfigAction.NAME, transportService, actionFilters, - (Supplier< ValidateJobConfigAction.Request>) ValidateJobConfigAction.Request::new); + super(ValidateJobConfigAction.NAME, transportService, actionFilters, ValidateJobConfigAction.Request::new); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 4a9e4fd41d9c2..18d724313e325 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -61,6 +61,7 @@ class DatafeedJob { private final long queryDelayMs; private final Client client; private final DataExtractorFactory dataExtractorFactory; + private final DatafeedTimingStatsReporter timingStatsReporter; private final Supplier currentTimeSupplier; private final DelayedDataDetector delayedDataDetector; @@ -74,13 +75,15 @@ class DatafeedJob { private volatile boolean isIsolated; DatafeedJob(String jobId, DataDescription dataDescription, long frequencyMs, long queryDelayMs, - DataExtractorFactory dataExtractorFactory, Client client, Auditor auditor, Supplier currentTimeSupplier, - DelayedDataDetector delayedDataDetector, long latestFinalBucketEndTimeMs, long latestRecordTimeMs) { + DataExtractorFactory dataExtractorFactory, DatafeedTimingStatsReporter timingStatsReporter, Client client, + Auditor auditor, Supplier currentTimeSupplier, DelayedDataDetector delayedDataDetector, + long latestFinalBucketEndTimeMs, long latestRecordTimeMs) { this.jobId = jobId; this.dataDescription = Objects.requireNonNull(dataDescription); this.frequencyMs = frequencyMs; this.queryDelayMs = queryDelayMs; this.dataExtractorFactory = dataExtractorFactory; + this.timingStatsReporter = timingStatsReporter; this.client = client; this.auditor = auditor; this.currentTimeSupplier = currentTimeSupplier; @@ -350,6 +353,7 @@ private void run(long start, long end, FlushJobAction.Request flushRequest) thro try (InputStream in = extractedData.get()) { counts = postData(in, XContentType.JSON); LOGGER.trace("[{}] Processed another {} records", jobId, counts.getProcessedRecordCount()); + timingStatsReporter.reportDataCounts(counts); } catch (Exception e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java index 0689b9774b56a..778e211640279 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java @@ -69,10 +69,20 @@ void build(String datafeedId, ActionListener listener) { TimeValue queryDelay = datafeedConfigHolder.get().getQueryDelay(); DelayedDataDetector delayedDataDetector = DelayedDataDetectorFactory.buildDetector(jobHolder.get(), datafeedConfigHolder.get(), client, xContentRegistry); - DatafeedJob datafeedJob = new DatafeedJob(jobHolder.get().getId(), buildDataDescription(jobHolder.get()), - frequency.millis(), queryDelay.millis(), - context.dataExtractorFactory, client, auditor, currentTimeSupplier, delayedDataDetector, - context.latestFinalBucketEndMs, context.latestRecordTimeMs); + DatafeedJob datafeedJob = + new DatafeedJob( + jobHolder.get().getId(), + buildDataDescription(jobHolder.get()), + frequency.millis(), + queryDelay.millis(), + context.dataExtractorFactory, + context.timingStatsReporter, + client, + auditor, + currentTimeSupplier, + delayedDataDetector, + context.latestFinalBucketEndMs, + context.latestRecordTimeMs); listener.onResponse(datafeedJob); }; @@ -92,12 +102,13 @@ void build(String datafeedId, ActionListener listener) { // Create data extractor factory Consumer datafeedTimingStatsHandler = timingStats -> { + context.timingStatsReporter = new DatafeedTimingStatsReporter(timingStats, jobResultsPersister); DataExtractorFactory.create( client, datafeedConfigHolder.get(), jobHolder.get(), xContentRegistry, - new DatafeedTimingStatsReporter(timingStats, jobResultsPersister), + context.timingStatsReporter, dataExtractorFactoryHandler); }; @@ -189,5 +200,6 @@ private static class Context { volatile long latestFinalBucketEndMs = -1L; volatile long latestRecordTimeMs = -1L; volatile DataExtractorFactory dataExtractorFactory; + volatile DatafeedTimingStatsReporter timingStatsReporter; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java index 260db421e0446..202df616036f7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import java.util.Objects; @@ -46,32 +47,65 @@ public void reportSearchDuration(TimeValue searchDuration) { return; } currentTimingStats.incrementTotalSearchTimeMs(searchDuration.millis()); + flushIfDifferSignificantly(); + } + + /** + * Reports the data counts received from the autodetect process. + */ + public void reportDataCounts(DataCounts dataCounts) { + if (dataCounts == null) { + return; + } + currentTimingStats.setBucketCount(dataCounts.getBucketCount()); + flushIfDifferSignificantly(); + } + + private void flushIfDifferSignificantly() { if (differSignificantly(currentTimingStats, persistedTimingStats)) { - // TODO: Consider changing refresh policy to NONE here and only do IMMEDIATE on datafeed _stop action - flush(WriteRequest.RefreshPolicy.IMMEDIATE); + flush(); } } - private void flush(WriteRequest.RefreshPolicy refreshPolicy) { + private void flush() { persistedTimingStats = new DatafeedTimingStats(currentTimingStats); - jobResultsPersister.persistDatafeedTimingStats(persistedTimingStats, refreshPolicy); + // TODO: Consider changing refresh policy to NONE here and only do IMMEDIATE on datafeed _stop action + jobResultsPersister.persistDatafeedTimingStats(persistedTimingStats, WriteRequest.RefreshPolicy.IMMEDIATE); } /** * Returns true if given stats objects differ from each other by more than 10% for at least one of the statistics. */ public static boolean differSignificantly(DatafeedTimingStats stats1, DatafeedTimingStats stats2) { - return differSignificantly(stats1.getTotalSearchTimeMs(), stats2.getTotalSearchTimeMs()); + return countsDifferSignificantly(stats1.getSearchCount(), stats2.getSearchCount()) + || differSignificantly(stats1.getTotalSearchTimeMs(), stats2.getTotalSearchTimeMs()) + || differSignificantly(stats1.getAvgSearchTimePerBucketMs(), stats2.getAvgSearchTimePerBucketMs()); } /** * Returns {@code true} if one of the ratios { value1 / value2, value2 / value1 } is smaller than MIN_VALID_RATIO. * This can be interpreted as values { value1, value2 } differing significantly from each other. */ - private static boolean differSignificantly(double value1, double value2) { - return (value2 / value1 < MIN_VALID_RATIO) - || (value1 / value2 < MIN_VALID_RATIO) - || Math.abs(value1 - value2) > MAX_VALID_ABS_DIFFERENCE_MS; + private static boolean countsDifferSignificantly(long value1, long value2) { + return (((double) value2) / value1 < MIN_VALID_RATIO) + || (((double) value1) / value2 < MIN_VALID_RATIO); + } + + /** + * Returns {@code true} if one of the ratios { value1 / value2, value2 / value1 } is smaller than MIN_VALID_RATIO or + * the absolute difference |value1 - value2| is greater than MAX_VALID_ABS_DIFFERENCE_MS. + * This can be interpreted as values { value1, value2 } differing significantly from each other. + * This method also returns: + * - {@code true} in case one value is {@code null} while the other is not. + * - {@code false} in case both values are {@code null}. + */ + private static boolean differSignificantly(Double value1, Double value2) { + if (value1 != null && value2 != null) { + return (value2 / value1 < MIN_VALID_RATIO) + || (value1 / value2 < MIN_VALID_RATIO) + || Math.abs(value1 - value2) > MAX_VALID_ABS_DIFFERENCE_MS; + } + return (value1 != null) || (value2 != null); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java index 6c27fba4156f6..94d0c6204ce00 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java @@ -117,7 +117,7 @@ public static void create(Client client, ); return; } - final List flattenedAggs = new ArrayList<>(); + final List> flattenedAggs = new ArrayList<>(); flattenAggregations(datafeed.getParsedAggregations(xContentRegistry) .getAggregatorFactories(), datafeedHistogramAggregation, flattenedAggs); @@ -148,7 +148,7 @@ private static boolean validInterval(long datafeedInterval, ParsedRollupCaps rol private static void flattenAggregations(final Collection datafeedAggregations, final AggregationBuilder datafeedHistogramAggregation, - final List flattenedAggregations) { + final List> flattenedAggregations) { for (AggregationBuilder aggregationBuilder : datafeedAggregations) { if (aggregationBuilder.equals(datafeedHistogramAggregation) == false) { flattenedAggregations.add((ValuesSourceAggregationBuilder)aggregationBuilder); @@ -157,8 +157,8 @@ private static void flattenAggregations(final Collection dat } } - private static boolean hasAggregations(ParsedRollupCaps rollupCaps, List datafeedAggregations) { - for (ValuesSourceAggregationBuilder aggregationBuilder : datafeedAggregations) { + private static boolean hasAggregations(ParsedRollupCaps rollupCaps, List> datafeedAggregations) { + for (ValuesSourceAggregationBuilder aggregationBuilder : datafeedAggregations) { String type = aggregationBuilder.getType(); String field = aggregationBuilder.field(); if (aggregationBuilder instanceof TermsAggregationBuilder) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java index e297695152435..7dafbb5f4dcf0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java @@ -9,7 +9,7 @@ import org.elasticsearch.geo.geometry.Geometry; import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.ShapeType; -import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.search.SearchHit; @@ -127,7 +127,7 @@ public boolean supportsFromSource() { } private static class GeoShapeField extends FromSource { - private static final WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); + private static final WellKnownText wkt = new WellKnownText(true, new StandardValidator(true)); GeoShapeField(String alias, String name) { super(alias, name); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java index 661525623575d..c19472e22a84e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java @@ -168,6 +168,7 @@ private static void addMetaData(Map mappingsAsMap, String analyt metadata.put(ANALYTICS, analyticsId); } + @SuppressWarnings("unchecked") private static V getOrPutDefault(Map map, K key, Supplier valueSupplier) { V value = (V) map.get(key); if (value == null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoiner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoiner.java index 1b3dd2932ab9e..dbbb7f3cf2313 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoiner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoiner.java @@ -104,7 +104,7 @@ private void checkChecksumsMatch(DataFrameDataExtractor.Row row, RowResults resu } private IndexRequest createIndexRequest(RowResults result, SearchHit hit) { - Map source = new LinkedHashMap(hit.getSourceAsMap()); + Map source = new LinkedHashMap<>(hit.getSourceAsMap()); source.putAll(result.getResults()); IndexRequest indexRequest = new IndexRequest(hit.getIndex()); indexRequest.id(hit.getId()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/RowResults.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/RowResults.java index ba4aebededa2e..f32e13703212d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/RowResults.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/RowResults.java @@ -20,6 +20,7 @@ public class RowResults implements ToXContentObject { public static final ParseField CHECKSUM = new ParseField("checksum"); public static final ParseField RESULTS = new ParseField("results"); + @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE.getPreferredName(), a -> new RowResults((Integer) a[0], (Map) a[1])); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java index 9dec0ddbf5ba0..08686ed26a31c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java @@ -279,9 +279,10 @@ static Tuple overrideFormatToGrokAndRegex(String overrideFormat) } throw new IllegalArgumentException(msg); } - // No need to append to the Grok pattern as %{SECOND} already allows for an optional - // fraction, but we need to remove the separator that's included in %{SECOND} - grokPatternBuilder.deleteCharAt(grokPatternBuilder.length() - 1); + // No need to append to the Grok pattern as %{SECOND} already allows for an optional fraction, + // but we need to remove the separator that's included in %{SECOND} (and that might be escaped) + int numCharsToDelete = (PUNCTUATION_THAT_NEEDS_ESCAPING_IN_REGEX.indexOf(prevChar) >= 0) ? 2 : 1; + grokPatternBuilder.delete(grokPatternBuilder.length() - numCharsToDelete, grokPatternBuilder.length()); regexBuilder.append("\\d{").append(endPos - startPos).append('}'); } else { grokPatternBuilder.append(grokPatternAndRegexForGroup.v1()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 7c2f15591b94f..683fbb7c65c17 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -45,6 +45,8 @@ import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -256,7 +258,7 @@ public void putJob(PutJobAction.Request request, AnalysisRegistry analysisRegist ActionListener putJobListener = new ActionListener() { @Override - public void onResponse(Boolean indicesCreated) { + public void onResponse(Boolean mappingsUpdated) { jobConfigProvider.putJob(job, ActionListener.wrap( response -> { @@ -283,10 +285,23 @@ public void onFailure(Exception e) { } }; + ActionListener addDocMappingsListener = ActionListener.wrap( + indicesCreated -> { + if (state == null) { + logger.warn("Cannot update doc mapping because clusterState == null"); + putJobListener.onResponse(false); + return; + } + ElasticsearchMappings.addDocMappingIfMissing( + AnomalyDetectorsIndex.configIndexName(), ElasticsearchMappings::configMapping, client, state, putJobListener); + }, + putJobListener::onFailure + ); + ActionListener> checkForLeftOverDocs = ActionListener.wrap( matchedIds -> { if (matchedIds.isEmpty()) { - jobResultsProvider.createJobResultIndex(job, state, putJobListener); + jobResultsProvider.createJobResultIndex(job, state, addDocMappingsListener); } else { // A job has the same Id as one of the group names // error with the first in the list diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java index afd670a180384..1aaaf1a65d2df 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java @@ -299,7 +299,18 @@ void refresh(PersistentTasksCustomMetaData persistentTasks, ActionListener } fullRefreshCompletionListeners.clear(); } - }, onCompletion::onFailure); + }, + e -> { + synchronized (fullRefreshCompletionListeners) { + assert fullRefreshCompletionListeners.isEmpty() == false; + for (ActionListener listener : fullRefreshCompletionListeners) { + listener.onFailure(e); + } + // It's critical that we empty out the current listener list on + // error otherwise subsequent retries to refresh will be ignored + fullRefreshCompletionListeners.clear(); + } + }); // persistentTasks will be null if there's never been a persistent task created in this cluster if (persistentTasks == null) { @@ -424,6 +435,10 @@ private void setAnomalyDetectorJobMemoryToLimit(String jobId, ActionListener { if (e instanceof ResourceNotFoundException) { // TODO: does this also happen if the .ml-config index exists but is unavailable? + // However, note that we wait for the .ml-config index to be available earlier on in the + // job assignment process, so that scenario should be very rare, i.e. somebody has closed + // the .ml-config index (which would be unexpected and unsupported for an internal index) + // during the memory refresh. logger.trace("[{}] anomaly detector job deleted during ML memory update", jobId); } else { logger.error("[" + jobId + "] failed to get anomaly detector job during ML memory update", e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java index 62494a03f9c52..812b70dc6717c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java @@ -73,10 +73,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient // We do not want to log anything due to a delete action // The response or error will be returned to the client when called synchronously // or it will be stored in the task result when called asynchronously - private static TaskListener nullTaskListener() { - return new TaskListener() { + private static TaskListener nullTaskListener() { + return new TaskListener() { @Override - public void onResponse(Task task, Object o) {} + public void onResponse(Task task, T o) {} @Override public void onFailure(Task task, Throwable e) {} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java index 29f348c3857b9..b3d4cc4f94d59 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java @@ -338,7 +338,7 @@ private void givenJobs(List jobs, List(jobs, jobs.size(), Job.RESULTS_FIELD)); return Void.TYPE; - }).when(jobManager).expandJobs(eq(MetaData.ALL), eq(true), any(ActionListener.class)); + }).when(jobManager).expandJobs(eq(MetaData.ALL), eq(true), any()); doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java index 1091395ea7ae7..c4deb22e7ae85 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java @@ -279,6 +279,7 @@ clusterService, mock(Client.class), mock(Auditor.class), mock(PersistentTasksSer jobConfigProvider, datafeedConfigProvider); } + @SuppressWarnings("unchecked") private void mockDatafeedConfigFindDatafeeds(Set datafeedIds) { doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; @@ -288,6 +289,7 @@ private void mockDatafeedConfigFindDatafeeds(Set datafeedIds) { }).when(datafeedConfigProvider).findDatafeedsForJobIds(any(), any(ActionListener.class)); } + @SuppressWarnings("unchecked") private void mockJobConfigProviderExpandIds(Set expandedIds) { doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[3]; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionActionTests.java index 11fdd9a2c09a4..010b5aa477032 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionActionTests.java @@ -42,7 +42,7 @@ public class TransportFinalizeJobExecutionActionTests extends ESTestCase { private Client client; @Before - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) private void setupMocks() { ExecutorService executorService = mock(ExecutorService.class); threadPool = mock(ThreadPool.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index cc9a0ba0181ad..b4bad2e894953 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -202,6 +202,7 @@ public static void addJobTask(String jobId, String nodeId, JobState jobState, Pe private void addIndices(MetaData.Builder metaData, RoutingTable.Builder routingTable) { List indices = new ArrayList<>(); + indices.add(AnomalyDetectorsIndex.configIndexName()); indices.add(AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX); indices.add(MlMetaIndex.INDEX_NAME); indices.add(AuditorField.NOTIFICATIONS_INDEX); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java index b2f1107590712..21f8d2678f134 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java @@ -45,6 +45,7 @@ public class TransportPreviewDatafeedActionTests extends ESTestCase { private Exception capturedFailure; @Before + @SuppressWarnings("unchecked") public void setUpTests() { dataExtractor = mock(DataExtractor.class); actionListener = mock(ActionListener.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java new file mode 100644 index 0000000000000..a322b92deaa5e --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; + +import java.util.List; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; + +public class TransportStartDataFrameAnalyticsActionTests extends ESTestCase { + + public void testVerifyIndicesPrimaryShardsAreActive() { + + // At present the only critical index is the config index + String indexName = AnomalyDetectorsIndex.configIndexName(); + + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); + indexMetaData.settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + ); + if (indexName.equals(AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX)) { + indexMetaData.putAlias(new AliasMetaData.Builder(AnomalyDetectorsIndex.jobStateIndexWriteAlias())); + } + metaData.put(indexMetaData); + Index index = new Index(indexName, "_uuid"); + ShardId shardId = new ShardId(index, 0); + ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); + shardRouting = shardRouting.initialize("node_id", null, 0L); + shardRouting = shardRouting.moveToStarted(); + routingTable.add(IndexRoutingTable.builder(index) + .addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build())); + + ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); + csBuilder.routingTable(routingTable.build()); + csBuilder.metaData(metaData); + + ClusterState cs = csBuilder.build(); + assertThat(TransportStartDataFrameAnalyticsAction.verifyIndicesPrimaryShardsAreActive(cs, indexName), empty()); + + metaData = new MetaData.Builder(cs.metaData()); + routingTable = new RoutingTable.Builder(cs.routingTable()); + if (randomBoolean()) { + routingTable.remove(indexName); + } else { + index = new Index(indexName, "_uuid"); + shardId = new ShardId(index, 0); + shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); + shardRouting = shardRouting.initialize("node_id", null, 0L); + routingTable.add(IndexRoutingTable.builder(index) + .addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build())); + } + + csBuilder.routingTable(routingTable.build()); + csBuilder.metaData(metaData); + List result = TransportStartDataFrameAnalyticsAction.verifyIndicesPrimaryShardsAreActive(csBuilder.build(), indexName); + assertThat(result, contains(indexName)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java index cd86241793240..609b0f8612efa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java @@ -54,6 +54,7 @@ public class DatafeedJobBuilderTests extends ESTestCase { private DatafeedJobBuilder datafeedJobBuilder; @Before + @SuppressWarnings("unchecked") public void init() { client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java index 8d8bd84a97c12..7b33a59d048de 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java @@ -73,10 +73,11 @@ public class DatafeedJobTests extends ESTestCase { private Auditor auditor; private DataExtractorFactory dataExtractorFactory; private DataExtractor dataExtractor; + private DatafeedTimingStatsReporter timingStatsReporter; private Client client; private DelayedDataDetector delayedDataDetector; private DataDescription.Builder dataDescription; - ActionFuture postDataFuture; + private ActionFuture postDataFuture; private ActionFuture flushJobFuture; private ActionFuture indexFuture; private ArgumentCaptor flushJobRequests; @@ -93,6 +94,7 @@ public void setup() throws Exception { dataExtractorFactory = mock(DataExtractorFactory.class); dataExtractor = mock(DataExtractor.class); when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor); + timingStatsReporter = mock(DatafeedTimingStatsReporter.class); client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); @@ -455,7 +457,7 @@ public void testFlushAnalysisProblemIsConflict() { private DatafeedJob createDatafeedJob(long frequencyMs, long queryDelayMs, long latestFinalBucketEndTimeMs, long latestRecordTimeMs) { Supplier currentTimeSupplier = () -> currentTime; - return new DatafeedJob(jobId, dataDescription.build(), frequencyMs, queryDelayMs, dataExtractorFactory, client, auditor, - currentTimeSupplier, delayedDataDetector, latestFinalBucketEndTimeMs, latestRecordTimeMs); + return new DatafeedJob(jobId, dataDescription.build(), frequencyMs, queryDelayMs, dataExtractorFactory, timingStatsReporter, + client, auditor, currentTimeSupplier, delayedDataDetector, latestFinalBucketEndTimeMs, latestRecordTimeMs); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index 4f186164760aa..4ae2ba4d0953f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -428,13 +428,13 @@ public static Job.Builder createDatafeedJob() { return builder; } + @SuppressWarnings({"rawtypes", "unchecked"}) private static DatafeedTask createDatafeedTask(String datafeedId, long startTime, Long endTime) { DatafeedTask task = mock(DatafeedTask.class); when(task.getDatafeedId()).thenReturn(datafeedId); when(task.getDatafeedStartTime()).thenReturn(startTime); when(task.getEndTime()).thenReturn(endTime); doAnswer(invocationOnMock -> { - @SuppressWarnings("rawtypes") ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(mock(PersistentTask.class)); return null; @@ -447,10 +447,10 @@ private Consumer mockConsumer() { return mock(Consumer.class); } + @SuppressWarnings({"rawtypes", "unchecked"}) private DatafeedTask spyDatafeedTask(DatafeedTask task) { task = spy(task); doAnswer(invocationOnMock -> { - @SuppressWarnings("rawtypes") ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(mock(PersistentTask.class)); return null; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java index 9c86f05f2076e..e0aa9a696cd0e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.junit.Before; import org.mockito.InOrder; @@ -17,7 +18,9 @@ import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; public class DatafeedTimingStatsReporterTests extends ESTestCase { @@ -31,59 +34,122 @@ public void setUpTests() { jobResultsPersister = mock(JobResultsPersister.class); } + public void testReportSearchDuration_Null() { + DatafeedTimingStatsReporter timingStatsReporter = + new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0), jobResultsPersister); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); + + timingStatsReporter.reportSearchDuration(null); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); + + verifyZeroInteractions(jobResultsPersister); + } + + public void testReportSearchDuration_Zero() { + DatafeedTimingStatsReporter timingStatsReporter = + new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID), jobResultsPersister); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 0, 0, 0.0))); + + timingStatsReporter.reportSearchDuration(TimeValue.ZERO); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 1, 0, 0.0))); + + verify(jobResultsPersister).persistDatafeedTimingStats(new DatafeedTimingStats(JOB_ID, 1, 0, 0.0), RefreshPolicy.IMMEDIATE); + verifyNoMoreInteractions(jobResultsPersister); + } + public void testReportSearchDuration() { DatafeedTimingStatsReporter timingStatsReporter = - new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, 10000.0), jobResultsPersister); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10000.0))); + new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 13, 10, 10000.0), jobResultsPersister); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 13, 10, 10000.0))); timingStatsReporter.reportSearchDuration(ONE_SECOND); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 4, 11000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 14, 10, 11000.0))); timingStatsReporter.reportSearchDuration(ONE_SECOND); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 5, 12000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 15, 10, 12000.0))); timingStatsReporter.reportSearchDuration(ONE_SECOND); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 6, 13000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 16, 10, 13000.0))); timingStatsReporter.reportSearchDuration(ONE_SECOND); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 7, 14000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 17, 10, 14000.0))); InOrder inOrder = inOrder(jobResultsPersister); inOrder.verify(jobResultsPersister).persistDatafeedTimingStats( - new DatafeedTimingStats(JOB_ID, 5, 12000.0), RefreshPolicy.IMMEDIATE); + new DatafeedTimingStats(JOB_ID, 15, 10, 12000.0), RefreshPolicy.IMMEDIATE); inOrder.verify(jobResultsPersister).persistDatafeedTimingStats( - new DatafeedTimingStats(JOB_ID, 7, 14000.0), RefreshPolicy.IMMEDIATE); + new DatafeedTimingStats(JOB_ID, 17, 10, 14000.0), RefreshPolicy.IMMEDIATE); + verifyNoMoreInteractions(jobResultsPersister); + } + + public void testReportDataCounts_Null() { + DatafeedTimingStatsReporter timingStatsReporter = + new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0), jobResultsPersister); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); + + timingStatsReporter.reportDataCounts(null); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); + + verifyZeroInteractions(jobResultsPersister); + } + + public void testReportDataCounts() { + DataCounts dataCounts = new DataCounts(JOB_ID); + dataCounts.incrementBucketCount(20); + DatafeedTimingStatsReporter timingStatsReporter = + new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, dataCounts.getBucketCount(), 10000.0), jobResultsPersister); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 20, 10000.0))); + + dataCounts.incrementBucketCount(1); + timingStatsReporter.reportDataCounts(dataCounts); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 21, 10000.0))); + + dataCounts.incrementBucketCount(1); + timingStatsReporter.reportDataCounts(dataCounts); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 22, 10000.0))); + + dataCounts.incrementBucketCount(1); + timingStatsReporter.reportDataCounts(dataCounts); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 23, 10000.0))); + + InOrder inOrder = inOrder(jobResultsPersister); + inOrder.verify(jobResultsPersister).persistDatafeedTimingStats( + new DatafeedTimingStats(JOB_ID, 3, 23, 10000.0), RefreshPolicy.IMMEDIATE); verifyNoMoreInteractions(jobResultsPersister); } public void testTimingStatsDifferSignificantly() { assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 1000.0)), + new DatafeedTimingStats(JOB_ID, 5, 10, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 1000.0)), is(false)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 1100.0)), + new DatafeedTimingStats(JOB_ID, 5, 10, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 1100.0)), is(false)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 1120.0)), + new DatafeedTimingStats(JOB_ID, 5, 10, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 1120.0)), is(true)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10000.0), new DatafeedTimingStats(JOB_ID, 5, 11000.0)), + new DatafeedTimingStats(JOB_ID, 5, 10, 10000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 11000.0)), is(false)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10000.0), new DatafeedTimingStats(JOB_ID, 5, 11200.0)), + new DatafeedTimingStats(JOB_ID, 5, 10, 10000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 11200.0)), is(true)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 100000.0), new DatafeedTimingStats(JOB_ID, 5, 110000.0)), + new DatafeedTimingStats(JOB_ID, 5, 10, 100000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 110000.0)), is(false)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 100000.0), new DatafeedTimingStats(JOB_ID, 5, 110001.0)), + new DatafeedTimingStats(JOB_ID, 5, 10, 100000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 110001.0)), + is(true)); + assertThat( + DatafeedTimingStatsReporter.differSignificantly( + new DatafeedTimingStats(JOB_ID, 5, 10, 100000.0), new DatafeedTimingStats(JOB_ID, 50, 10, 100000.0)), is(true)); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java index 67e48c11fd1c4..3c071df8c3b6f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java @@ -72,6 +72,7 @@ protected NamedXContentRegistry xContentRegistry() { } @Before + @SuppressWarnings({"rawtypes", "unchecked"}) public void setUpTests() { client = mock(Client.class); timingStatsReporter = mock(DatafeedTimingStatsReporter.class); @@ -86,14 +87,12 @@ public void setUpTests() { when(getRollupIndexResponse.getJobs()).thenReturn(new HashMap<>()); doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; listener.onResponse(fieldsCapabilities); return null; }).when(client).execute(same(FieldCapabilitiesAction.INSTANCE), any(), any()); doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; listener.onResponse(getRollupIndexResponse); return null; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index eb1542ae814df..f4074b0f5b46d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -126,6 +126,7 @@ public Long getLastTimestamp() { } @Before + @SuppressWarnings("unchecked") public void setUpTests() { ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java index 47c5aa26390a5..0d95d46804180 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java @@ -61,6 +61,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { private ActionFuture clearScrollFuture; @Before + @SuppressWarnings("unchecked") public void setUpTests() { ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java index fd2b396d62541..a67ff4ab3374f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java @@ -67,7 +67,7 @@ public void testProcess_GivenSingleRowAndResult() throws IOException { String dataDoc = "{\"f_1\": \"foo\", \"f_2\": 42.0}"; String[] dataValues = {"42.0"}; DataFrameDataExtractor.Row row = newRow(newHit(dataDoc), dataValues, 1); - givenDataFrameBatches(Arrays.asList(row)); + givenDataFrameBatches(List.of(Arrays.asList(row))); Map resultFields = new HashMap<>(); resultFields.put("a", "1"); @@ -97,7 +97,7 @@ public void testProcess_GivenFullResultsBatch() throws IOException { IntStream.range(0, 1000).forEach(i -> firstBatch.add(newRow(newHit(dataDoc), dataValues, i))); List secondBatch = new ArrayList<>(1); secondBatch.add(newRow(newHit(dataDoc), dataValues, 1000)); - givenDataFrameBatches(firstBatch, secondBatch); + givenDataFrameBatches(List.of(firstBatch, secondBatch)); Map resultFields = new HashMap<>(); resultFields.put("a", "1"); @@ -118,7 +118,7 @@ public void testProcess_GivenSingleRowAndResultWithMismatchingIdHash() throws IO String dataDoc = "{\"f_1\": \"foo\", \"f_2\": 42.0}"; String[] dataValues = {"42.0"}; DataFrameDataExtractor.Row row = newRow(newHit(dataDoc), dataValues, 1); - givenDataFrameBatches(Arrays.asList(row)); + givenDataFrameBatches(List.of(Arrays.asList(row))); Map resultFields = new HashMap<>(); resultFields.put("a", "1"); @@ -136,7 +136,7 @@ public void testProcess_GivenSingleBatchWithSkippedRows() throws IOException { String dataDoc = "{\"f_1\": \"foo\", \"f_2\": 42.0}"; String[] dataValues = {"42.0"}; DataFrameDataExtractor.Row normalRow = newRow(newHit(dataDoc), dataValues, 2); - givenDataFrameBatches(Arrays.asList(skippedRow, normalRow)); + givenDataFrameBatches(List.of(Arrays.asList(skippedRow, normalRow))); Map resultFields = new HashMap<>(); resultFields.put("a", "1"); @@ -166,7 +166,7 @@ public void testProcess_GivenTwoBatchesWhereFirstEndsWithSkippedRow() throws IOE DataFrameDataExtractor.Row normalRow2 = newRow(newHit(dataDoc), dataValues, 2); DataFrameDataExtractor.Row skippedRow = newRow(newHit("{}"), null, 3); DataFrameDataExtractor.Row normalRow3 = newRow(newHit(dataDoc), dataValues, 4); - givenDataFrameBatches(Arrays.asList(normalRow1, normalRow2, skippedRow), Arrays.asList(normalRow3)); + givenDataFrameBatches(List.of(Arrays.asList(normalRow1, normalRow2, skippedRow), Arrays.asList(normalRow3))); Map resultFields = new HashMap<>(); resultFields.put("a", "1"); @@ -195,7 +195,7 @@ public void testProcess_GivenMoreResultsThanRows() throws IOException { String dataDoc = "{\"f_1\": \"foo\", \"f_2\": 42.0}"; String[] dataValues = {"42.0"}; DataFrameDataExtractor.Row row = newRow(newHit(dataDoc), dataValues, 1); - givenDataFrameBatches(Arrays.asList(row)); + givenDataFrameBatches(List.of(List.of(row))); Map resultFields = new HashMap<>(); resultFields.put("a", "1"); @@ -214,7 +214,7 @@ public void testProcess_GivenNoResults_ShouldCancelAndConsumeExtractor() throws String[] dataValues = {"42.0"}; DataFrameDataExtractor.Row row1 = newRow(newHit(dataDoc), dataValues, 1); DataFrameDataExtractor.Row row2 = newRow(newHit(dataDoc), dataValues, 1); - givenDataFrameBatches(Arrays.asList(row1), Arrays.asList(row2)); + givenDataFrameBatches(List.of(List.of(row1), List.of(row2))); givenProcessResults(Collections.emptyList()); @@ -229,8 +229,8 @@ private void givenProcessResults(List results) { } } - private void givenDataFrameBatches(List... batches) throws IOException { - DelegateStubDataExtractor delegateStubDataExtractor = new DelegateStubDataExtractor(Arrays.asList(batches)); + private void givenDataFrameBatches(List> batches) throws IOException { + DelegateStubDataExtractor delegateStubDataExtractor = new DelegateStubDataExtractor(batches); when(dataExtractor.hasNext()).thenAnswer(a -> delegateStubDataExtractor.hasNext()); when(dataExtractor.next()).thenAnswer(a -> delegateStubDataExtractor.next()); } @@ -254,6 +254,7 @@ private void givenClientHasNoFailures() { ThreadContext threadContext = new ThreadContext(Settings.EMPTY); ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(threadContext); + @SuppressWarnings("unchecked") ActionFuture responseFuture = mock(ActionFuture.class); when(responseFuture.actionGet()).thenReturn(new BulkResponse(new BulkItemResponse[0], 0)); when(client.execute(same(BulkAction.INSTANCE), bulkRequestCaptor.capture())).thenReturn(responseFuture); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java index 0b872e630b6ab..12fb14d642a55 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java @@ -737,16 +737,27 @@ public void testCustomOverrideMatchingBuiltInFormat() { assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); } - public void testCustomOverrideNotMatchingBuiltInFormat() { + public void testCustomOverridesNotMatchingBuiltInFormat() { - String overrideFormat = "MM/dd HH.mm.ss,SSSSSS 'in' yyyy"; - String text = "05/15 17.14.56,374946 in 2018"; - String expectedSimpleRegex = "\\b\\d{2}/\\d{2} \\d{2}\\.\\d{2}\\.\\d{2},\\d{6} in \\d{4}\\b"; - String expectedGrokPatternName = "CUSTOM_TIMESTAMP"; - Map expectedCustomGrokPatternDefinitions = + validateCustomOverrideNotMatchingBuiltInFormat("MM/dd HH.mm.ss,SSSSSS 'in' yyyy", "05/15 17.14.56,374946 in 2018", + "\\b\\d{2}/\\d{2} \\d{2}\\.\\d{2}\\.\\d{2},\\d{6} in \\d{4}\\b", "CUSTOM_TIMESTAMP", Collections.singletonMap(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, - "%{MONTHNUM2}/%{MONTHDAY} %{HOUR}\\.%{MINUTE}\\.%{SECOND} in %{YEAR}"); + "%{MONTHNUM2}/%{MONTHDAY} %{HOUR}\\.%{MINUTE}\\.%{SECOND} in %{YEAR}")); + validateCustomOverrideNotMatchingBuiltInFormat("'some_prefix 'dd.MM.yyyy HH:mm:ss.SSSSSS", "some_prefix 06.01.2018 16:56:14.295748", + "some_prefix \\d{2}\\.\\d{2}\\.\\d{4} \\d{2}:\\d{2}:\\d{2}\\.\\d{6}\\b", "CUSTOM_TIMESTAMP", + Collections.singletonMap(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, + "some_prefix %{MONTHDAY}\\.%{MONTHNUM2}\\.%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND}")); + + validateCustomOverrideNotMatchingBuiltInFormat("dd.MM. yyyy HH:mm:ss.SSSSSS", "06.01. 2018 16:56:14.295748", + "\\b\\d{2}\\.\\d{2}\\. \\d{4} \\d{2}:\\d{2}:\\d{2}\\.\\d{6}\\b", "CUSTOM_TIMESTAMP", + Collections.singletonMap(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, + "%{MONTHDAY}\\.%{MONTHNUM2}\\. %{YEAR} %{HOUR}:%{MINUTE}:%{SECOND}")); + } + + private void validateCustomOverrideNotMatchingBuiltInFormat(String overrideFormat, String text, String expectedSimpleRegex, + String expectedGrokPatternName, + Map expectedCustomGrokPatternDefinitions) { TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, true, true, true, NOOP_TIMEOUT_CHECKER); strictTimestampFormatFinder.addSample(text); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java index 95106385a9e22..7f5ae33bd3469 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java @@ -58,7 +58,7 @@ public void testCloseFailedJob() throws Exception { PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); assertEquals(1, tasks.taskMap().size()); // now just double check that the first job is still opened: - PersistentTasksCustomMetaData.PersistentTask task = tasks.getTask(MlTasks.jobTaskId("close-failed-job-1")); + PersistentTasksCustomMetaData.PersistentTask task = tasks.getTask(MlTasks.jobTaskId("close-failed-job-1")); assertEquals(JobState.OPENED, ((JobTaskState) task.getState()).getState()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index 5c8f9e3f51a68..126b33c3058f3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -323,6 +323,7 @@ public void testNotifyFilterChangedGivenNoop() { Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); } + @SuppressWarnings({"unchecked", "rawtypes"}) public void testNotifyFilterChanged() throws IOException { Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); detectorReferencingFilter.setByFieldName("foo"); @@ -531,7 +532,7 @@ public void testUpdateProcessOnCalendarChanged() { )); ArgumentCaptor updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class); - verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any(ActionListener.class)); + verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any()); List capturedUpdateParams = updateParamsCaptor.getAllValues(); assertThat(capturedUpdateParams.size(), equalTo(2)); @@ -573,7 +574,7 @@ public void testUpdateProcessOnCalendarChanged_GivenGroups() throws IOException )); ArgumentCaptor updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class); - verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any(ActionListener.class)); + verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any()); List capturedUpdateParams = updateParamsCaptor.getAllValues(); assertThat(capturedUpdateParams.size(), equalTo(2)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java index 2ff21243979be..5b81002d9326c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java @@ -162,7 +162,7 @@ ScrollResponsesMocker addBatch(String... hits) { return this; } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) void finishMock() { if (batches.isEmpty()) { givenInitialResponse(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleterTests.java index f02bc5bf9f1e7..2cc53aec2bc89 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleterTests.java @@ -48,7 +48,7 @@ public void testDeleteDatafeedTimingStats() { ArgumentCaptor deleteRequestCaptor = ArgumentCaptor.forClass(DeleteByQueryRequest.class); verify(client).threadPool(); - verify(client).execute(eq(DeleteByQueryAction.INSTANCE), deleteRequestCaptor.capture(), any(ActionListener.class)); + verify(client).execute(eq(DeleteByQueryAction.INSTANCE), deleteRequestCaptor.capture(), any()); verifyNoMoreInteractions(client); DeleteByQueryRequest deleteRequest = deleteRequestCaptor.getValue(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java index dea47ef1b9d9b..da69ef3760a6f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java @@ -232,6 +232,7 @@ public void testPersistTimingStats() { verifyNoMoreInteractions(client); } + @SuppressWarnings({"unchecked", "rawtypes"}) public void testPersistDatafeedTimingStats() { Client client = mockClient(ArgumentCaptor.forClass(BulkRequest.class)); doAnswer( @@ -239,13 +240,13 @@ public void testPersistDatafeedTimingStats() { // Take the listener passed to client::index as 2nd argument ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; // Handle the response on the listener - listener.onResponse(new IndexResponse()); + listener.onResponse(new IndexResponse(null, null, null, 0, 0, 0, false)); return null; }) .when(client).index(any(), any(ActionListener.class)); JobResultsPersister persister = new JobResultsPersister(client); - DatafeedTimingStats timingStats = new DatafeedTimingStats("foo", 6, 666.0); + DatafeedTimingStats timingStats = new DatafeedTimingStats("foo", 6, 66, 666.0); persister.persistDatafeedTimingStats(timingStats, WriteRequest.RefreshPolicy.IMMEDIATE); ArgumentCaptor indexRequestCaptor = ArgumentCaptor.forClass(IndexRequest.class); @@ -260,6 +261,7 @@ public void testPersistDatafeedTimingStats() { Map.of( "job_id", "foo", "search_count", 6, + "bucket_count", 66, "total_search_time_ms", 666.0))); verify(client, times(1)).threadPool(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java index aec8b72e8cb72..cf005d3c3da59 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java @@ -263,7 +263,7 @@ public void testBuckets_OneBucketNoInterim() throws IOException { BucketsQueryBuilder bq = new BucketsQueryBuilder().from(from).size(size).anomalyScoreThreshold(1.0); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.buckets(jobId, bq, r -> holder[0] = r, e -> {throw new RuntimeException(e);}, client); QueryPage buckets = holder[0]; @@ -297,7 +297,7 @@ public void testBuckets_OneBucketInterim() throws IOException { BucketsQueryBuilder bq = new BucketsQueryBuilder().from(from).size(size).anomalyScoreThreshold(5.1) .includeInterim(true); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.buckets(jobId, bq, r -> holder[0] = r, e -> {throw new RuntimeException(e);}, client); QueryPage buckets = holder[0]; @@ -333,7 +333,7 @@ public void testBuckets_UsingBuilder() throws IOException { bq.anomalyScoreThreshold(5.1); bq.includeInterim(true); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.buckets(jobId, bq, r -> holder[0] = r, e -> {throw new RuntimeException(e);}, client); QueryPage buckets = holder[0]; @@ -379,7 +379,7 @@ public void testBucket_OneBucketNoExpand() throws IOException { BucketsQueryBuilder bq = new BucketsQueryBuilder(); bq.timestamp(Long.toString(now.getTime())); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] bucketHolder = new QueryPage[1]; provider.buckets(jobId, bq, q -> bucketHolder[0] = q, e -> {}, client); assertThat(bucketHolder[0].count(), equalTo(1L)); @@ -420,7 +420,7 @@ public void testRecords() throws IOException { .epochEnd(String.valueOf(now.getTime())).includeInterim(true).sortField(sortfield) .recordScore(2.2); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.records(jobId, rqb, page -> holder[0] = page, RuntimeException::new, client); QueryPage recordPage = holder[0]; @@ -473,7 +473,7 @@ public void testRecords_UsingBuilder() throws IOException { rqb.sortField(sortfield); rqb.recordScore(2.2); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.records(jobId, rqb, page -> holder[0] = page, RuntimeException::new, client); QueryPage recordPage = holder[0]; @@ -518,7 +518,7 @@ public void testBucketRecords() throws IOException { Client client = getMockedClient(qb -> {}, response); JobResultsProvider provider = createProvider(client); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.bucketRecords(jobId, bucket, from, size, true, sortfield, true, page -> holder[0] = page, RuntimeException::new, client); @@ -579,7 +579,7 @@ public void testCategoryDefinitions() throws IOException { Client client = getMockedClient(q -> {}, response); JobResultsProvider provider = createProvider(client); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.categoryDefinitions(jobId, null, false, from, size, r -> holder[0] = r, e -> {throw new RuntimeException(e);}, client); @@ -601,7 +601,7 @@ public void testCategoryDefinition() throws IOException { SearchResponse response = createSearchResponse(Collections.singletonList(source)); Client client = getMockedClient(q -> {}, response); JobResultsProvider provider = createProvider(client); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.categoryDefinitions(jobId, categoryId, false, null, null, r -> holder[0] = r, e -> {throw new RuntimeException(e);}, client); @@ -643,7 +643,7 @@ public void testInfluencers_NoInterim() throws IOException { Client client = getMockedClient(q -> qbHolder[0] = q, response); JobResultsProvider provider = createProvider(client); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; InfluencersQuery query = new InfluencersQueryBuilder().from(from).size(size).includeInterim(false).build(); provider.influencers(jobId, query, page -> holder[0] = page, RuntimeException::new, client); @@ -703,7 +703,7 @@ public void testInfluencers_WithInterim() throws IOException { Client client = getMockedClient(q -> qbHolder[0] = q, response); JobResultsProvider provider = createProvider(client); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; InfluencersQuery query = new InfluencersQueryBuilder().from(from).size(size).start("0").end("0").sortField("sort") .sortDescending(true).influencerScoreThreshold(0.0).includeInterim(true).build(); @@ -758,7 +758,7 @@ public void testModelSnapshots() throws IOException { Client client = getMockedClient(qb -> {}, response); JobResultsProvider provider = createProvider(client); - @SuppressWarnings({"unchecked"}) + @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; provider.modelSnapshots(jobId, from, size, r -> holder[0] = r, RuntimeException::new); QueryPage page = holder[0]; @@ -860,7 +860,7 @@ public void testTimingStats_Ok() throws IOException { verify(client).prepareSearch(indexName); verify(client).threadPool(); - verify(client).search(any(SearchRequest.class), any(ActionListener.class)); + verify(client).search(any(SearchRequest.class), any()); verifyNoMoreInteractions(client); } @@ -881,7 +881,7 @@ public void testTimingStats_NotFound() throws IOException { verify(client).prepareSearch(indexName); verify(client).threadPool(); - verify(client).search(any(SearchRequest.class), any(ActionListener.class)); + verify(client).search(any(SearchRequest.class), any()); verifyNoMoreInteractions(client); } @@ -903,12 +903,14 @@ public void testDatafeedTimingStats_MultipleDocumentsAtOnce() throws IOException Map.of( Job.ID.getPreferredName(), "foo", DatafeedTimingStats.SEARCH_COUNT.getPreferredName(), 6, + DatafeedTimingStats.BUCKET_COUNT.getPreferredName(), 66, DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 666.0)); List> sourceBar = Arrays.asList( Map.of( Job.ID.getPreferredName(), "bar", DatafeedTimingStats.SEARCH_COUNT.getPreferredName(), 7, + DatafeedTimingStats.BUCKET_COUNT.getPreferredName(), 77, DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 777.0)); SearchResponse responseFoo = createSearchResponse(sourceFoo); SearchResponse responseBar = createSearchResponse(sourceBar); @@ -943,12 +945,15 @@ public void testDatafeedTimingStats_MultipleDocumentsAtOnce() throws IOException statsByJobId -> assertThat( statsByJobId, - equalTo(Map.of("foo", new DatafeedTimingStats("foo", 6, 666.0), "bar", new DatafeedTimingStats("bar", 7, 777.0)))), + equalTo( + Map.of( + "foo", new DatafeedTimingStats("foo", 6, 66, 666.0), + "bar", new DatafeedTimingStats("bar", 7, 77, 777.0)))), e -> { throw new AssertionError(); }); verify(client).threadPool(); verify(client).prepareMultiSearch(); - verify(client).multiSearch(any(MultiSearchRequest.class), any(ActionListener.class)); + verify(client).multiSearch(any(MultiSearchRequest.class), any()); verify(client).prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("foo")); verify(client).prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("bar")); verifyNoMoreInteractions(client); @@ -961,6 +966,7 @@ public void testDatafeedTimingStats_Ok() throws IOException { Map.of( Job.ID.getPreferredName(), "foo", DatafeedTimingStats.SEARCH_COUNT.getPreferredName(), 6, + DatafeedTimingStats.BUCKET_COUNT.getPreferredName(), 66, DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 666.0)); SearchResponse response = createSearchResponse(source); Client client = getMockedClient( @@ -971,12 +977,12 @@ public void testDatafeedTimingStats_Ok() throws IOException { JobResultsProvider provider = createProvider(client); provider.datafeedTimingStats( "foo", - stats -> assertThat(stats, equalTo(new DatafeedTimingStats("foo", 6, 666.0))), + stats -> assertThat(stats, equalTo(new DatafeedTimingStats("foo", 6, 66, 666.0))), e -> { throw new AssertionError(); }); verify(client).prepareSearch(indexName); verify(client).threadPool(); - verify(client).search(any(SearchRequest.class), any(ActionListener.class)); + verify(client).search(any(SearchRequest.class), any()); verifyNoMoreInteractions(client); } @@ -997,7 +1003,7 @@ public void testDatafeedTimingStats_NotFound() throws IOException { verify(client).prepareSearch(indexName); verify(client).threadPool(); - verify(client).search(any(SearchRequest.class), any(ActionListener.class)); + verify(client).search(any(SearchRequest.class), any()); verifyNoMoreInteractions(client); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index b64bc42cb2c63..b325d3b33ca61 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -151,6 +151,7 @@ public MockClientBuilder prepareGet(String index, String type, String id, GetRes return this; } + @SuppressWarnings("unchecked") public MockClientBuilder get(GetResponse response) { doAnswer(new Answer() { @Override @@ -382,6 +383,7 @@ public MockClientBuilder bulk(BulkResponse response) { return this; } + @SuppressWarnings("unchecked") public MockClientBuilder preparePutMapping(AcknowledgedResponse response, String type) { PutMappingRequestBuilder requestBuilder = mock(PutMappingRequestBuilder.class); when(requestBuilder.setType(eq(type))).thenReturn(requestBuilder); @@ -400,6 +402,7 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { return this; } + @SuppressWarnings("unchecked") public MockClientBuilder prepareGetMapping(GetMappingsResponse response) { GetMappingsRequestBuilder builder = mock(GetMappingsRequestBuilder.class); @@ -417,6 +420,7 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { return this; } + @SuppressWarnings("unchecked") public MockClientBuilder putTemplate(ArgumentCaptor requestCaptor) { doAnswer(new Answer() { @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java index 0935a4c628d6b..0d3932ece29f1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java @@ -53,6 +53,7 @@ public void initialize() { when(executorService.submit(any(Runnable.class))).thenReturn(mock(Future.class)); } + @SuppressWarnings("unchecked") public void testProcessStartTime() throws Exception { InputStream logStream = mock(InputStream.class); when(logStream.read(new byte[1024])).thenReturn(-1); @@ -74,6 +75,7 @@ public void testProcessStartTime() throws Exception { } } + @SuppressWarnings("unchecked") public void testWriteRecord() throws IOException { InputStream logStream = mock(InputStream.class); when(logStream.read(new byte[1024])).thenReturn(-1); @@ -109,6 +111,7 @@ bos, outputStream, mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyLis } } + @SuppressWarnings("unchecked") public void testFlush() throws IOException { InputStream logStream = mock(InputStream.class); when(logStream.read(new byte[1024])).thenReturn(-1); @@ -141,6 +144,7 @@ public void testPersistJob() throws IOException { testWriteMessage(p -> p.persistState(), AutodetectControlMsgWriter.BACKGROUND_PERSIST_MESSAGE_CODE); } + @SuppressWarnings("unchecked") public void testConsumeAndCloseOutputStream() throws IOException { InputStream logStream = mock(InputStream.class); when(logStream.read(new byte[1024])).thenReturn(-1); @@ -157,6 +161,7 @@ processInStream, processOutStream, mock(OutputStream.class), NUMBER_FIELDS, Coll } } + @SuppressWarnings("unchecked") private void testWriteMessage(CheckedConsumer writeFunction, String expectedMessageCode) throws IOException { InputStream logStream = mock(InputStream.class); when(logStream.read(new byte[1024])).thenReturn(-1); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvDataToProcessWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvDataToProcessWriterTests.java index 6e846b000237d..cf65eec4f04df 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvDataToProcessWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvDataToProcessWriterTests.java @@ -265,6 +265,7 @@ public void testWrite_NullByte() throws IOException { verify(dataCountsReporter).finishReporting(any()); } + @SuppressWarnings("unchecked") public void testWrite_EmptyInput() throws IOException { AnalysisConfig.Builder builder = new AnalysisConfig.Builder(Collections.singletonList(new Detector.Builder("metric", "value").build())); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java index 7cb5b6ea8ca01..53242f07677a8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java @@ -86,6 +86,7 @@ private static SearchResponse createSearchResponse(List to return searchResponse; } + @SuppressWarnings("unchecked") public void testRemoveGivenNoJobs() throws IOException { SearchResponse response = createSearchResponse(Collections.emptyList()); @@ -102,7 +103,7 @@ public void testRemoveGivenNoJobs() throws IOException { assertEquals(remover.getRetentionDaysCallCount, 0); } - + @SuppressWarnings("unchecked") public void testRemoveGivenMulipleBatches() throws IOException { // This is testing AbstractExpiredJobDataRemover.WrappedBatchedJobsIterator int totalHits = 7; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java index 3a3775844d1dd..977828ac21075 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java @@ -192,6 +192,7 @@ public void testRemove_GivenClientDeleteSnapshotRequestsFail() throws IOExceptio assertThat(deleteSnapshotRequest.getSnapshotId(), equalTo("snapshots-1_1")); } + @SuppressWarnings("unchecked") private void givenJobs(List jobs) throws IOException { SearchResponse response = AbstractExpiredJobDataRemoverTests.createSearchResponse(jobs); @@ -234,6 +235,7 @@ private void givenClientDeleteModelSnapshotRequestsFail() { givenClientRequests(true, false); } + @SuppressWarnings("unchecked") private void givenClientRequests(boolean shouldSearchRequestsSucceed, boolean shouldDeleteSnapshotRequestsSucceed) { doAnswer(new Answer() { int callCount = 0; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java index 0398882a8e35e..5bca7b36436ad 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java @@ -46,6 +46,7 @@ public class ExpiredResultsRemoverTests extends ESTestCase { private ActionListener listener; @Before + @SuppressWarnings("unchecked") public void setUpTests() { capturedDeleteByQueryRequests = new ArrayList<>(); client = mock(Client.class); @@ -132,6 +133,7 @@ private void givenClientRequestsFailed() { givenClientRequests(false); } + @SuppressWarnings("unchecked") private void givenClientRequests(boolean shouldSucceed) { doAnswer(new Answer() { @Override @@ -151,6 +153,7 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { }).when(client).execute(same(DeleteByQueryAction.INSTANCE), any(), any()); } + @SuppressWarnings("unchecked") private void givenJobs(List jobs) throws IOException { SearchResponse response = AbstractExpiredJobDataRemoverTests.createSearchResponse(jobs); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java index 429575902b0b8..ed17be1f3dcdd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; @@ -32,11 +33,13 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static org.hamcrest.CoreMatchers.instanceOf; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doAnswer; @@ -110,7 +113,7 @@ public void testRefreshAll() { Consumer listener = (Consumer) invocation.getArguments()[3]; listener.accept(randomLongBetween(1000, 1000000)); return null; - }).when(jobResultsProvider).getEstablishedMemoryUsage(anyString(), any(), any(), any(Consumer.class), any()); + }).when(jobResultsProvider).getEstablishedMemoryUsage(anyString(), any(), any(), any(), any()); memoryTracker.refresh(persistentTasks, ActionListener.wrap(aVoid -> {}, ESTestCase::assertNull)); @@ -119,12 +122,72 @@ public void testRefreshAll() { String jobId = "job" + i; verify(jobResultsProvider, times(1)).getEstablishedMemoryUsage(eq(jobId), any(), any(), any(), any()); } - verify(configProvider, times(1)).getMultiple(eq(String.join(",", allIds)), eq(false), any(ActionListener.class)); + verify(configProvider, times(1)).getMultiple(eq(String.join(",", allIds)), eq(false), any()); } else { verify(jobResultsProvider, never()).getEstablishedMemoryUsage(anyString(), any(), any(), any(), any()); } } + public void testRefreshAllFailure() { + + Map> tasks = new HashMap<>(); + + int numAnomalyDetectorJobTasks = randomIntBetween(2, 5); + for (int i = 1; i <= numAnomalyDetectorJobTasks; ++i) { + String jobId = "job" + i; + PersistentTasksCustomMetaData.PersistentTask task = makeTestAnomalyDetectorTask(jobId); + tasks.put(task.getId(), task); + } + + int numDataFrameAnalyticsTasks = randomIntBetween(2, 5); + for (int i = 1; i <= numDataFrameAnalyticsTasks; ++i) { + String id = "analytics" + i; + PersistentTasksCustomMetaData.PersistentTask task = makeTestDataFrameAnalyticsTask(id); + tasks.put(task.getId(), task); + } + + PersistentTasksCustomMetaData persistentTasks = + new PersistentTasksCustomMetaData(numAnomalyDetectorJobTasks + numDataFrameAnalyticsTasks, tasks); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + Consumer listener = (Consumer) invocation.getArguments()[3]; + listener.accept(randomLongBetween(1000, 1000000)); + return null; + }).when(jobResultsProvider).getEstablishedMemoryUsage(anyString(), any(), any(), any(), any()); + + // First run a refresh using a component that calls the onFailure method of the listener + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener> listener = + (ActionListener>) invocation.getArguments()[2]; + listener.onFailure(new IllegalArgumentException("computer says no")); + return null; + }).when(configProvider).getMultiple(anyString(), anyBoolean(), any()); + + AtomicBoolean gotErrorResponse = new AtomicBoolean(false); + memoryTracker.refresh(persistentTasks, + ActionListener.wrap(aVoid -> fail("Expected error response"), e -> gotErrorResponse.set(true))); + assertTrue(gotErrorResponse.get()); + + // Now run another refresh using a component that calls the onResponse method of the listener - this + // proves that the ML memory tracker has not been permanently blocked up by the previous failure + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener> listener = + (ActionListener>) invocation.getArguments()[2]; + listener.onResponse(Collections.emptyList()); + return null; + }).when(configProvider).getMultiple(anyString(), anyBoolean(), any()); + + AtomicBoolean gotSuccessResponse = new AtomicBoolean(false); + memoryTracker.refresh(persistentTasks, + ActionListener.wrap(aVoid -> gotSuccessResponse.set(true), e -> fail("Expected success response"))); + assertTrue(gotSuccessResponse.get()); + } + public void testRefreshOneAnomalyDetectorJob() { boolean isMaster = randomBoolean(); @@ -143,7 +206,7 @@ public void testRefreshOneAnomalyDetectorJob() { Consumer listener = (Consumer) invocation.getArguments()[3]; listener.accept(haveEstablishedModelMemory ? modelBytes : 0L); return null; - }).when(jobResultsProvider).getEstablishedMemoryUsage(eq(jobId), any(), any(), any(Consumer.class), any()); + }).when(jobResultsProvider).getEstablishedMemoryUsage(eq(jobId), any(), any(), any(), any()); boolean simulateVeryOldJob = randomBoolean(); long recentJobModelMemoryLimitMb = 2; @@ -154,7 +217,7 @@ public void testRefreshOneAnomalyDetectorJob() { ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onResponse(job); return null; - }).when(jobManager).getJob(eq(jobId), any(ActionListener.class)); + }).when(jobManager).getJob(eq(jobId), any()); AtomicReference refreshedMemoryRequirement = new AtomicReference<>(); memoryTracker.refreshAnomalyDetectorJobMemory(jobId, ActionListener.wrap(refreshedMemoryRequirement::set, ESTestCase::assertNull)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/LocalStateMonitoring.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/LocalStateMonitoring.java index 3ab34652e229e..16122b2aa9e20 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/LocalStateMonitoring.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/LocalStateMonitoring.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.indexlifecycle.IndexLifecycle; +import org.elasticsearch.xpack.ilm.IndexLifecycle; import org.elasticsearch.xpack.watcher.Watcher; import java.nio.file.Path; diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java index dc294ef53de52..3bcfc009f96ca 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java @@ -147,7 +147,7 @@ public void testSerialization() throws IOException { final int iterations = randomIntBetween(5, 50); for (int i = 0; i < iterations; i++) { final MonitoringBulkDoc original = randomMonitoringBulkDoc(random()); - final MonitoringBulkDoc deserialized = copyWriteable(original, registry, MonitoringBulkDoc::readFrom); + final MonitoringBulkDoc deserialized = copyWriteable(original, registry, MonitoringBulkDoc::new); assertEquals(original, deserialized); assertEquals(original.hashCode(), deserialized.hashCode()); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java index 5113371f2b338..f1d03bf5ff767 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java @@ -240,8 +240,7 @@ public void testSerialization() throws IOException { final StreamInput in = out.bytes().streamInput(); in.setVersion(out.getVersion()); - final MonitoringBulkRequest deserializedRequest = new MonitoringBulkRequest(); - deserializedRequest.readFrom(in); + final MonitoringBulkRequest deserializedRequest = new MonitoringBulkRequest(in); assertThat(in.available(), equalTo(0)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java index 901025ff2c444..daae3ca53bc96 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java @@ -70,9 +70,7 @@ public void testSerialization() throws IOException { StreamInput streamInput = output.bytes().streamInput(); streamInput.setVersion(version); - MonitoringBulkResponse response2 = new MonitoringBulkResponse(); - response2.readFrom(streamInput); - + MonitoringBulkResponse response2 = new MonitoringBulkResponse(streamInput); assertThat(response2.getTookInMillis(), equalTo(response.getTookInMillis())); if (response.getError() == null) { assertThat(response2.getError(), is(nullValue())); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java index dd64e0d2ded95..ebfbf8d223db9 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java @@ -175,6 +175,7 @@ public void testToXContent() throws IOException { + "\"timing_stats\":{" + "\"job_id\":\"_job_id\"," + "\"bucket_count\":100," + + "\"total_bucket_processing_time_ms\":2000.0," + "\"minimum_bucket_processing_time_ms\":10.0," + "\"maximum_bucket_processing_time_ms\":30.0," + "\"average_bucket_processing_time_ms\":20.0," diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index aa58b9fa60660..ecb0d637f14fb 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -635,7 +635,8 @@ private MonitoringDoc newRandomMonitoringDoc() { long intervalMillis = randomNonNegativeLong(); MonitoringDoc.Node sourceNode = MonitoringTestUtils.randomMonitoringNode(random()); - return new IndexRecoveryMonitoringDoc(clusterUUID, timestamp, intervalMillis, sourceNode, new RecoveryResponse()); + return new IndexRecoveryMonitoringDoc(clusterUUID, timestamp, intervalMillis, sourceNode, + new RecoveryResponse(0, 0, 0, null, null)); } private List newRandomMonitoringDocs(int nb) { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java index 19a981939c480..3aa07a58a7666 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java @@ -204,7 +204,7 @@ private static List getProtocols() { } else { JavaVersion full = AccessController.doPrivileged( - (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.specification.version"))); + (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { return List.of("TLSv1.2"); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java index 4a1ee1d1471cc..7457ea3095cd3 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java @@ -28,7 +28,6 @@ import java.util.Map; import java.util.Optional; import java.util.TreeMap; -import java.util.function.Supplier; import java.util.stream.Collectors; public class TransportGetRollupCapsAction extends HandledTransportAction { @@ -37,8 +36,7 @@ public class TransportGetRollupCapsAction extends HandledTransportAction) GetRollupCapsAction.Request::new); + super(GetRollupCapsAction.NAME, transportService, actionFilters, GetRollupCapsAction.Request::new); this.clusterService = clusterService; } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupIndexCapsAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupIndexCapsAction.java index dd25dff489840..7c8df1fbb47d0 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupIndexCapsAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupIndexCapsAction.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -36,8 +35,7 @@ public class TransportGetRollupIndexCapsAction extends HandledTransportAction) GetRollupIndexCapsAction.Request::new); + super(GetRollupIndexCapsAction.NAME, transportService, actionFilters, GetRollupIndexCapsAction.Request::new); this.clusterService = clusterService; } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 571d37e9652ba..ddd40ad12ba7f 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -72,7 +72,7 @@ public TransportPutRollupJobAction(TransportService transportService, ThreadPool ClusterService clusterService, XPackLicenseState licenseState, PersistentTasksService persistentTasksService, Client client) { super(PutRollupJobAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, PutRollupJobAction.Request::new); + PutRollupJobAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; this.persistentTasksService = persistentTasksService; this.client = client; @@ -88,11 +88,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, PutRollupJobAction.Request request, ClusterState clusterState, ActionListener listener) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 0421f72aaf5b8..cd7ad4fdf07fc 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; +import org.elasticsearch.xpack.core.scheduler.CronSchedule; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.rollup.Rollup; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetRollupCapsActionRequestTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetRollupCapsActionRequestTests.java index 09fd039533591..3fc67d8ae9ae7 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetRollupCapsActionRequestTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetRollupCapsActionRequestTests.java @@ -10,7 +10,8 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.GetRollupCapsAction; @@ -27,7 +28,7 @@ import static org.hamcrest.Matchers.equalTo; -public class GetRollupCapsActionRequestTests extends AbstractStreamableTestCase { +public class GetRollupCapsActionRequestTests extends AbstractWireSerializingTestCase { @Override protected GetRollupCapsAction.Request createTestInstance() { @@ -38,8 +39,8 @@ protected GetRollupCapsAction.Request createTestInstance() { } @Override - protected GetRollupCapsAction.Request createBlankInstance() { - return new GetRollupCapsAction.Request(); + protected Writeable.Reader instanceReader() { + return GetRollupCapsAction.Request::new; } public void testNoIndexMetaData() { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetRollupIndexCapsActionRequestTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetRollupIndexCapsActionRequestTests.java index e9d5d6153b18c..9878d46044855 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetRollupIndexCapsActionRequestTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetRollupIndexCapsActionRequestTests.java @@ -10,7 +10,8 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; @@ -27,7 +28,7 @@ import static org.hamcrest.Matchers.equalTo; -public class GetRollupIndexCapsActionRequestTests extends AbstractStreamableTestCase { +public class GetRollupIndexCapsActionRequestTests extends AbstractWireSerializingTestCase { @Override protected GetRollupIndexCapsAction.Request createTestInstance() { @@ -38,11 +39,10 @@ protected GetRollupIndexCapsAction.Request createTestInstance() { } @Override - protected GetRollupIndexCapsAction.Request createBlankInstance() { - return new GetRollupIndexCapsAction.Request(); + protected Writeable.Reader instanceReader() { + return GetRollupIndexCapsAction.Request::new; } - public void testNoIndicesByRollup() { ImmutableOpenMap indices = new ImmutableOpenMap.Builder().build(); Map caps = getCapsByRollupIndex(Collections.singletonList("foo"), indices); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobActionRequestTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobActionRequestTests.java index 2765c9ca8c1d2..5b0e20232b81f 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobActionRequestTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobActionRequestTests.java @@ -6,15 +6,16 @@ package org.elasticsearch.xpack.rollup.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction.Request; import org.junit.Before; import java.io.IOException; -public class PutJobActionRequestTests extends AbstractStreamableXContentTestCase { +public class PutJobActionRequestTests extends AbstractSerializingTestCase { private String jobId; @@ -29,13 +30,13 @@ protected Request createTestInstance() { } @Override - protected boolean supportsUnknownFields() { - return false; + protected Writeable.Reader instanceReader() { + return Request::new; } @Override - protected Request createBlankInstance() { - return new Request(); + protected boolean supportsUnknownFields() { + return false; } @Override diff --git a/x-pack/plugin/security/qa/basic-enable-security/build.gradle b/x-pack/plugin/security/qa/basic-enable-security/build.gradle index 94bfb73c07679..461bc11a9b47a 100644 --- a/x-pack/plugin/security/qa/basic-enable-security/build.gradle +++ b/x-pack/plugin/security/qa/basic-enable-security/build.gradle @@ -16,7 +16,7 @@ integTest { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' numberOfNodes = 2 setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' diff --git a/x-pack/plugin/security/qa/security-basic/build.gradle b/x-pack/plugin/security/qa/security-basic/build.gradle index 4dc948e477ed2..4fa19511797ef 100644 --- a/x-pack/plugin/security/qa/security-basic/build.gradle +++ b/x-pack/plugin/security/qa/security-basic/build.gradle @@ -9,7 +9,7 @@ dependencies { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' numberOfNodes = 2 setting 'xpack.ilm.enabled', 'false' diff --git a/x-pack/plugin/security/qa/tls-basic/build.gradle b/x-pack/plugin/security/qa/tls-basic/build.gradle index a8e9e16aeda1b..f7bcadc1fccc9 100644 --- a/x-pack/plugin/security/qa/tls-basic/build.gradle +++ b/x-pack/plugin/security/qa/tls-basic/build.gradle @@ -18,7 +18,7 @@ forbiddenPatterns { testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' numberOfNodes = 2 extraConfigFile 'http.key', file('src/test/resources/ssl/http.key') diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java index 63eb54a0b4ddd..8acc6631920fc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -22,6 +23,7 @@ import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import java.io.IOException; import java.util.List; public class TransportClearRealmCacheAction extends TransportNodesAction { private final SecurityContext securityContext; @Inject public TransportAuthenticateAction(TransportService transportService, ActionFilters actionFilters, SecurityContext securityContext) { - super(AuthenticateAction.NAME, transportService, actionFilters, (Supplier) AuthenticateRequest::new); + super(AuthenticateAction.NAME, transportService, actionFilters, AuthenticateRequest::new); this.securityContext = securityContext; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java index 12e840b4f89f2..74024c3074afb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java @@ -21,8 +21,6 @@ import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; -import java.util.function.Supplier; - public class TransportDeleteUserAction extends HandledTransportAction { private final Settings settings; @@ -31,7 +29,7 @@ public class TransportDeleteUserAction extends HandledTransportAction) DeleteUserRequest::new); + super(DeleteUserAction.NAME, transportService, actionFilters, DeleteUserRequest::new); this.settings = settings; this.usersStore = usersStore; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 39b981b42e310..6961613fe31ee 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -135,7 +135,7 @@ public List getUnlicensedRealms() { final List allowedRealms = this.asList(); // Shortcut for the typical case, all the configured realms are allowed - if (allowedRealms.equals(this.realms.size())) { + if (allowedRealms.equals(this.realms)) { return Collections.emptyList(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java index 92a521789ec86..40d44503aef22 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java @@ -110,7 +110,30 @@ public boolean supports(AuthenticationToken token) { @Override public X509AuthenticationToken token(ThreadContext context) { - return token(context.getTransient(PKI_CERT_HEADER_NAME), principalPattern, logger); + Object pkiHeaderValue = context.getTransient(PKI_CERT_HEADER_NAME); + if (pkiHeaderValue == null) { + return null; + } + assert pkiHeaderValue instanceof X509Certificate[]; + X509Certificate[] certificates = (X509Certificate[]) pkiHeaderValue; + if (certificates.length == 0) { + return null; + } + X509AuthenticationToken token = new X509AuthenticationToken(certificates); + // the following block of code maintains BWC: + // When constructing the token object we only return it if the Subject DN of the certificate can be parsed by at least one PKI + // realm. We then consider the parsed Subject DN as the "principal" even though it is potentially incorrect because when several + // realms are installed the one that first parses the principal might not be the one that finally authenticates (does trusted chain + // validation). In this case the principal should be set by the realm that completes the authentication. But in the common case, + // where a single PKI realm is configured, there is no risk of eagerly parsing the principal before authentication and it also + // maintains BWC. + String parsedPrincipal = getPrincipalFromSubjectDN(principalPattern, token, logger); + if (parsedPrincipal == null) { + return null; + } + token.setPrincipal(parsedPrincipal); + // end BWC code block + return token; } @Override @@ -122,25 +145,41 @@ public void authenticate(AuthenticationToken authToken, ActionListener cachingListener = ActionListener.wrap(result -> { - if (result.isAuthenticated()) { - try (ReleasableLock ignored = readLock.acquire()) { - cache.put(fingerprint, result.getUser()); + // parse the principal again after validating the cert chain, and do not rely on the token.principal one, because that could + // be set by a different realm that failed trusted chain validation. We SHOULD NOT parse the principal BEFORE this step, but + // we do it for BWC purposes. Changing this is a breaking change. + final String principal = getPrincipalFromSubjectDN(principalPattern, token, logger); + if (principal == null) { + logger.debug((Supplier) () -> new ParameterizedMessage( + "the extracted principal after cert chain validation, from DN [{}], using pattern [{}] is null", token.dn(), + principalPattern.toString())); + listener.onResponse(AuthenticationResult.unsuccessful("Could not parse principal from Subject DN " + token.dn(), null)); + } else { + final ActionListener cachingListener = ActionListener.wrap(result -> { + if (result.isAuthenticated()) { + try (ReleasableLock ignored = readLock.acquire()) { + cache.put(fingerprint, result.getUser()); + } } + listener.onResponse(result); + }, listener::onFailure); + if (false == principal.equals(token.principal())) { + logger.debug((Supplier) () -> new ParameterizedMessage( + "the extracted principal before [{}] and after [{}] cert chain validation, for DN [{}], are different", + token.principal(), principal, token.dn())); + } + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(principal, cachingListener); + } else { + buildUser(token, principal, cachingListener); } - listener.onResponse(result); - }, listener::onFailure); - if (delegatedRealms.hasDelegation()) { - delegatedRealms.resolve(token.principal(), cachingListener); - } else { - this.buildUser(token, cachingListener); } } } catch (CertificateEncodingException e) { @@ -148,12 +187,11 @@ public void authenticate(AuthenticationToken authToken, ActionListener listener) { + private void buildUser(X509AuthenticationToken token, String principal, ActionListener listener) { final Map metadata = Map.of("pki_dn", token.dn()); - final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(token.principal(), token.dn(), Set.of(), metadata, config); + final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(principal, token.dn(), Set.of(), metadata, config); roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { - final User computedUser = - new User(token.principal(), roles.toArray(new String[roles.size()]), null, null, metadata, true); + final User computedUser = new User(principal, roles.toArray(new String[roles.size()]), null, null, metadata, true); listener.onResponse(AuthenticationResult.success(computedUser)); }, listener::onFailure)); } @@ -163,47 +201,33 @@ public void lookupUser(String username, ActionListener listener) { listener.onResponse(null); } - static X509AuthenticationToken token(Object pkiHeaderValue, Pattern principalPattern, Logger logger) { - if (pkiHeaderValue == null) { - return null; - } - - assert pkiHeaderValue instanceof X509Certificate[]; - X509Certificate[] certificates = (X509Certificate[]) pkiHeaderValue; - if (certificates.length == 0) { - return null; - } - - String dn = certificates[0].getSubjectX500Principal().toString(); + static String getPrincipalFromSubjectDN(Pattern principalPattern, X509AuthenticationToken token, Logger logger) { + String dn = token.credentials()[0].getSubjectX500Principal().toString(); Matcher matcher = principalPattern.matcher(dn); - if (!matcher.find()) { - if (logger.isDebugEnabled()) { - logger.debug("certificate authentication succeeded for [{}] but could not extract principal from DN", dn); - } + if (false == matcher.find()) { + logger.debug((Supplier) () -> new ParameterizedMessage("could not extract principal from DN [{}] using pattern [{}]", dn, + principalPattern.toString())); return null; } - String principal = matcher.group(1); if (Strings.isNullOrEmpty(principal)) { - if (logger.isDebugEnabled()) { - logger.debug("certificate authentication succeeded for [{}] but extracted principal was empty", dn); - } + logger.debug((Supplier) () -> new ParameterizedMessage("the extracted principal from DN [{}] using pattern [{}] is empty", + dn, principalPattern.toString())); return null; } - return new X509AuthenticationToken(certificates, principal, dn); + return principal; } - static boolean isCertificateChainTrusted(X509TrustManager trustManager, X509AuthenticationToken token, Logger logger) { + private static boolean isCertificateChainTrusted(X509TrustManager trustManager, X509AuthenticationToken token, Logger logger) { if (trustManager != null) { try { trustManager.checkClientTrusted(token.credentials(), AUTH_TYPE); return true; } catch (CertificateException e) { if (logger.isTraceEnabled()) { - logger.trace((Supplier) - () -> new ParameterizedMessage("failed certificate validation for principal [{}]", token.principal()), e); + logger.trace("failed certificate validation for Subject DN [" + token.dn() + "]", e); } else if (logger.isDebugEnabled()) { - logger.debug("failed certificate validation for principal [{}]", token.principal()); + logger.debug("failed certificate validation for Subject DN [{}]", token.dn()); } } return false; @@ -213,7 +237,7 @@ static boolean isCertificateChainTrusted(X509TrustManager trustManager, X509Auth return true; } - X509TrustManager trustManagers(RealmConfig realmConfig) { + private X509TrustManager trustManagers(RealmConfig realmConfig) { final List certificateAuthorities = realmConfig.hasSetting(PkiRealmSettings.CAPATH_SETTING) ? realmConfig.getSetting(PkiRealmSettings.CAPATH_SETTING) : null; String truststorePath = realmConfig.getSetting(PkiRealmSettings.TRUST_STORE_PATH).orElse(null); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java index 8603a662efa4c..30722dbb8a446 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java @@ -8,17 +8,18 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import java.security.cert.X509Certificate; +import java.util.Objects; public class X509AuthenticationToken implements AuthenticationToken { - private final String principal; private final String dn; - private X509Certificate[] credentials; + private final X509Certificate[] credentials; + private String principal; - public X509AuthenticationToken(X509Certificate[] certificates, String principal, String dn) { - this.principal = principal; - this.credentials = certificates; - this.dn = dn; + public X509AuthenticationToken(X509Certificate[] certificates) { + this.credentials = Objects.requireNonNull(certificates); + this.dn = certificates.length == 0 ? "" : certificates[0].getSubjectX500Principal().toString(); + this.principal = this.dn; } @Override @@ -26,6 +27,10 @@ public String principal() { return principal; } + public void setPrincipal(String principal) { + this.principal = principal; + } + @Override public X509Certificate[] credentials() { return credentials; @@ -37,6 +42,6 @@ public String dn() { @Override public void clearCredentials() { - credentials = null; + // noop } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilter.java index 12f6b67d67242..b90a6f4991cf4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilter.java @@ -28,9 +28,9 @@ public final class NioIPFilter extends DelegatingHandler { } @Override - public void channelRegistered() { + public void channelActive() { if (filter.accept(profile, remoteAddress)) { - super.channelRegistered(); + super.channelActive(); } else { denied = true; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java index 8947447ef58d6..6a1684dd02485 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java @@ -55,8 +55,8 @@ public final class SSLChannelContext extends SocketChannelContext { } @Override - public void register() throws IOException { - super.register(); + protected void channelActive() throws IOException { + super.channelActive(); sslDriver.init(); SSLOutboundBuffer outboundBuffer = sslDriver.getOutboundBuffer(); if (outboundBuffer.hasEncryptedBytesToFlush()) { @@ -179,8 +179,15 @@ public boolean selectorShouldClose() { @Override public void closeChannel() { if (isClosing.compareAndSet(false, true)) { - WriteOperation writeOperation = new CloseNotifyOperation(this); - getSelector().queueWrite(writeOperation); + // The model for closing channels will change at some point, removing the need for this "schedule + // a write" signal. But for now, we need to handle the edge case where the channel is not + // registered. + if (getSelectionKey() == null) { + getSelector().queueChannelClose(channel); + } else { + WriteOperation writeOperation = new CloseNotifyOperation(this); + getSelector().queueWrite(writeOperation); + } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java index 8b1a40aa266f0..2837475c62559 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java @@ -129,6 +129,11 @@ protected Function clientChannelFactoryFunctio }; } + @Override + public boolean isSecure() { + return this.sslEnabled; + } + private class SecurityTcpChannelFactory extends TcpChannelFactory { private final String profileName; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java index 8fa280b68de37..fd0c4d8f45900 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java @@ -15,13 +15,18 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.AuthenticationType; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; import org.junit.Before; +import java.io.EOFException; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.instanceOf; + public class SecurityContextTests extends ESTestCase { private Settings settings; @@ -51,6 +56,14 @@ public void testGetAuthenticationAndUser() throws IOException { assertEquals(user, securityContext.getUser()); } + public void testGetAuthenticationDoesNotSwallowIOException() { + threadContext.putHeader(AuthenticationField.AUTHENTICATION_KEY, ""); // an intentionally corrupt header + final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + final UncheckedIOException e = expectThrows(UncheckedIOException.class, securityContext::getAuthentication); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf(EOFException.class)); + } + public void testSetUser() { final User user = new User("test"); assertNull(securityContext.getAuthentication()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/SamlPrepareAuthenticationRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/SamlPrepareAuthenticationRequestTests.java index e257c37cbd377..c610215152035 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/SamlPrepareAuthenticationRequestTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/SamlPrepareAuthenticationRequestTests.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.security.action.saml; -import java.io.IOException; - import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationRequest; import org.elasticsearch.xpack.security.authc.saml.SamlTestCase; import org.hamcrest.Matchers; +import java.io.IOException; + public class SamlPrepareAuthenticationRequestTests extends SamlTestCase { public void testSerialiseNonNullCriteria() throws IOException { @@ -32,8 +32,7 @@ private void serialiseAndValidate(SamlPrepareAuthenticationRequest req1) throws final BytesStreamOutput out = new BytesStreamOutput(); req1.writeTo(out); - final SamlPrepareAuthenticationRequest req2 = new SamlPrepareAuthenticationRequest(); - req2.readFrom(out.bytes().streamInput()); + final SamlPrepareAuthenticationRequest req2 = new SamlPrepareAuthenticationRequest(out.bytes().streamInput()); assertThat(req2.getRealmName(), Matchers.equalTo(req1.getRealmName())); assertThat(req2.getAssertionConsumerServiceURL(), Matchers.equalTo(req1.getAssertionConsumerServiceURL())); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index a26e05e523459..0cee62879fbd0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -48,6 +48,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -110,6 +111,7 @@ public void testWithSettings() throws Exception { } assertThat(realms.getUnlicensedRealms(), empty()); + assertThat(realms.getUnlicensedRealms(), sameInstance(realms.getUnlicensedRealms())); } public void testWithSettingsWhereDifferentRealmsHaveSameOrder() throws Exception { @@ -150,6 +152,7 @@ public void testWithSettingsWhereDifferentRealmsHaveSameOrder() throws Exception } assertThat(realms.getUnlicensedRealms(), empty()); + assertThat(realms.getUnlicensedRealms(), sameInstance(realms.getUnlicensedRealms())); } public void testWithSettingsWithMultipleInternalRealmsOfSameType() throws Exception { @@ -185,6 +188,7 @@ public void testWithEmptySettings() throws Exception { assertThat(iter.hasNext(), is(false)); assertThat(realms.getUnlicensedRealms(), empty()); + assertThat(realms.getUnlicensedRealms(), sameInstance(realms.getUnlicensedRealms())); } public void testUnlicensedWithOnlyCustomRealms() throws Exception { @@ -220,6 +224,7 @@ public void testUnlicensedWithOnlyCustomRealms() throws Exception { } assertThat(realms.getUnlicensedRealms(), empty()); + assertThat(realms.getUnlicensedRealms(), sameInstance(realms.getUnlicensedRealms())); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); @@ -303,6 +308,7 @@ public void testUnlicensedWithInternalRealms() throws Exception { } assertThat(types, contains("ldap", "type_0")); assertThat(realms.getUnlicensedRealms(), empty()); + assertThat(realms.getUnlicensedRealms(), sameInstance(realms.getUnlicensedRealms())); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); iter = realms.iterator(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java index 5b2ab36426363..1e5b188160ce2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java @@ -57,9 +57,16 @@ protected Settings nodeSettings() { .put("xpack.security.authc.realms.file.file.order", "0") .put("xpack.security.authc.realms.pki.pki1.order", "1") .putList("xpack.security.authc.realms.pki.pki1.certificate_authorities", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt").toString()) + .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")) + .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")) + // pki1 never authenticates because of the principal pattern + .put("xpack.security.authc.realms.pki.pki1.username_pattern", "CN=(MISMATCH.*?)(?:,|$)") + .put("xpack.security.authc.realms.pki.pki2.order", "2") + .putList("xpack.security.authc.realms.pki.pki2.certificate_authorities", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt").toString(), getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt").toString()) - .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")); + .put("xpack.security.authc.realms.pki.pki2.files.role_mapping", getDataPath("role_mapping.yml")); return builder.build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java index 2d46d96a914b7..e5eb265979a87 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java @@ -79,7 +79,7 @@ public void testTokenSupport() { assertThat(realm.supports(null), is(false)); assertThat(realm.supports(new UsernamePasswordToken("", new SecureString(new char[0]))), is(false)); - assertThat(realm.supports(new X509AuthenticationToken(new X509Certificate[0], "", "")), is(true)); + assertThat(realm.supports(new X509AuthenticationToken(new X509Certificate[0])), is(true)); } public void testExtractToken() throws Exception { @@ -92,7 +92,6 @@ public void testExtractToken() throws Exception { X509AuthenticationToken token = realm.token(threadContext); assertThat(token, is(notNullValue())); assertThat(token.dn(), is("CN=Elasticsearch Test Node, OU=elasticsearch, O=org")); - assertThat(token.principal(), is("Elasticsearch Test Node")); } public void testAuthenticateBasedOnCertToken() throws Exception { @@ -112,7 +111,8 @@ private void assertSuccessfulAuthentication(Set roles) throws Exception PkiRealm realm = buildRealm(roleMapper, globalSettings); verify(roleMapper).refreshRealmOnChange(realm); - final String expectedUsername = token.principal(); + final String expectedUsername = PkiRealm.getPrincipalFromSubjectDN(Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), + token, NoOpLogger.INSTANCE); final AuthenticationResult result = authenticate(token, realm); final PlainActionFuture future; assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); @@ -133,10 +133,9 @@ private void assertSuccessfulAuthentication(Set roles) throws Exception realm.expire(expectedUsername); } } - future = new PlainActionFuture<>(); - realm.authenticate(token, future); - assertEquals(AuthenticationResult.Status.SUCCESS, future.actionGet().getStatus()); - assertEquals(user, future.actionGet().getUser()); + final AuthenticationResult result2 = authenticate(token, realm); + assertThat(AuthenticationResult.Status.SUCCESS, is(result2.getStatus())); + assertThat(user, is(result2.getUser())); } final int numTimes = invalidate ? 2 : 1; @@ -144,6 +143,16 @@ private void assertSuccessfulAuthentication(Set roles) throws Exception verifyNoMoreInteractions(roleMapper); } + private UserRoleMapper buildRoleMapper() { + UserRoleMapper roleMapper = mock(UserRoleMapper.class); + Mockito.doAnswer(invocation -> { + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onResponse(Collections.emptySet()); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + return roleMapper; + } + private UserRoleMapper buildRoleMapper(Set roles, String dn) { UserRoleMapper roleMapper = mock(UserRoleMapper.class); Mockito.doAnswer(invocation -> { @@ -172,7 +181,7 @@ private PkiRealm buildRealm(UserRoleMapper roleMapper, Settings settings, Realm. private X509AuthenticationToken buildToken() throws Exception { X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - return new X509AuthenticationToken(new X509Certificate[]{certificate}, "Elasticsearch Test Node", "CN=Elasticsearch Test Node,"); + return new X509AuthenticationToken(new X509Certificate[]{certificate}); } private AuthenticationResult authenticate(X509AuthenticationToken token, PkiRealm realm) { @@ -181,38 +190,44 @@ private AuthenticationResult authenticate(X509AuthenticationToken token, PkiReal return future.actionGet(); } - public void testCustomUsernamePattern() throws Exception { + public void testCustomUsernamePatternMatches() throws Exception { final Settings settings = Settings.builder() .put(globalSettings) .put("xpack.security.authc.realms.pki.my_pki.username_pattern", "OU=(.*?),") .build(); ThreadContext threadContext = new ThreadContext(settings); X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - UserRoleMapper roleMapper = mock(UserRoleMapper.class); - PkiRealm realm = new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), settings, - TestEnvironment.newEnvironment(settings), threadContext), roleMapper); - realm.initialize(Collections.emptyList(), licenseState); - Mockito.doAnswer(invocation -> { - ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; - listener.onResponse(Collections.emptySet()); - return null; - }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + UserRoleMapper roleMapper = buildRoleMapper(); + PkiRealm realm = buildRealm(roleMapper, settings); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); - PlainActionFuture future = new PlainActionFuture<>(); - realm.authenticate(token, future); - User user = future.actionGet().getUser(); + User user = authenticate(token, realm).getUser(); assertThat(user, is(notNullValue())); assertThat(user.principal(), is("elasticsearch")); assertThat(user.roles(), is(notNullValue())); assertThat(user.roles().length, is(0)); } + public void testCustomUsernamePatternMismatchesAndNullToken() throws Exception { + final Settings settings = Settings.builder() + .put(globalSettings) + .put("xpack.security.authc.realms.pki.my_pki.username_pattern", "OU=(mismatch.*?),") + .build(); + ThreadContext threadContext = new ThreadContext(settings); + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + UserRoleMapper roleMapper = buildRoleMapper(); + PkiRealm realm = buildRealm(roleMapper, settings); + threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); + + X509AuthenticationToken token = realm.token(threadContext); + assertThat(token, is(nullValue())); + } + public void testVerificationUsingATruststore() throws Exception { X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - UserRoleMapper roleMapper = mock(UserRoleMapper.class); + UserRoleMapper roleMapper = buildRoleMapper(); MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("xpack.security.authc.realms.pki.my_pki.truststore.secure_password", "testnode"); Settings settings = Settings.builder() @@ -222,21 +237,12 @@ public void testVerificationUsingATruststore() throws Exception { .setSecureSettings(secureSettings) .build(); ThreadContext threadContext = new ThreadContext(globalSettings); - PkiRealm realm = new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), settings, - TestEnvironment.newEnvironment(globalSettings), threadContext), roleMapper); - realm.initialize(Collections.emptyList(), licenseState); - Mockito.doAnswer(invocation -> { - ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; - listener.onResponse(Collections.emptySet()); - return null; - }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + PkiRealm realm = buildRealm(roleMapper, settings); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); - PlainActionFuture future = new PlainActionFuture<>(); - realm.authenticate(token, future); - User user = future.actionGet().getUser(); + User user = authenticate(token, realm).getUser(); assertThat(user, is(notNullValue())); assertThat(user.principal(), is("Elasticsearch Test Node")); assertThat(user.roles(), is(notNullValue())); @@ -245,32 +251,25 @@ public void testVerificationUsingATruststore() throws Exception { public void testVerificationFailsUsingADifferentTruststore() throws Exception { X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - UserRoleMapper roleMapper = mock(UserRoleMapper.class); + UserRoleMapper roleMapper = buildRoleMapper(); MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.security.authc.realms.pki.mypki.truststore.secure_password", "testnode-client-profile"); + secureSettings.setString("xpack.security.authc.realms.pki.my_pki.truststore.secure_password", "testnode-client-profile"); Settings settings = Settings.builder() .put(globalSettings) - .put("xpack.security.authc.realms.pki.mypki.truststore.path", + .put("xpack.security.authc.realms.pki.my_pki.truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) .setSecureSettings(secureSettings) .build(); - final ThreadContext threadContext = new ThreadContext(settings); - PkiRealm realm = new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "mypki"), settings, - TestEnvironment.newEnvironment(settings), threadContext), roleMapper); - realm.initialize(Collections.emptyList(), licenseState); - Mockito.doAnswer(invocation -> { - ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; - listener.onResponse(Collections.emptySet()); - return null; - }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + ThreadContext threadContext = new ThreadContext(settings); + PkiRealm realm = buildRealm(roleMapper, settings); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); - PlainActionFuture future = new PlainActionFuture<>(); - realm.authenticate(token, future); - User user = future.actionGet().getUser(); - assertThat(user, is(nullValue())); + AuthenticationResult result = authenticate(token, realm); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getMessage(), containsString("not trusted")); + assertThat(result.getUser(), is(nullValue())); } public void testTruststorePathWithoutPasswordThrowsException() throws Exception { @@ -306,11 +305,13 @@ public void testCertificateWithOnlyCnExtractsProperly() throws Exception { X500Principal principal = new X500Principal("CN=PKI Client"); when(certificate.getSubjectX500Principal()).thenReturn(principal); - X509AuthenticationToken token = PkiRealm.token(new X509Certificate[]{certificate}, - Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); + X509AuthenticationToken token = new X509AuthenticationToken(new X509Certificate[]{certificate}); assertThat(token, notNullValue()); - assertThat(token.principal(), is("PKI Client")); assertThat(token.dn(), is("CN=PKI Client")); + + String parsedPrincipal = PkiRealm.getPrincipalFromSubjectDN(Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), token, + NoOpLogger.INSTANCE); + assertThat(parsedPrincipal, is("PKI Client")); } public void testCertificateWithCnAndOuExtractsProperly() throws Exception { @@ -318,11 +319,13 @@ public void testCertificateWithCnAndOuExtractsProperly() throws Exception { X500Principal principal = new X500Principal("CN=PKI Client, OU=Security"); when(certificate.getSubjectX500Principal()).thenReturn(principal); - X509AuthenticationToken token = PkiRealm.token(new X509Certificate[]{certificate}, - Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); + X509AuthenticationToken token = new X509AuthenticationToken(new X509Certificate[]{certificate}); assertThat(token, notNullValue()); - assertThat(token.principal(), is("PKI Client")); assertThat(token.dn(), is("CN=PKI Client, OU=Security")); + + String parsedPrincipal = PkiRealm.getPrincipalFromSubjectDN(Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), token, + NoOpLogger.INSTANCE); + assertThat(parsedPrincipal, is("PKI Client")); } public void testCertificateWithCnInMiddle() throws Exception { @@ -330,11 +333,13 @@ public void testCertificateWithCnInMiddle() throws Exception { X500Principal principal = new X500Principal("EMAILADDRESS=pki@elastic.co, CN=PKI Client, OU=Security"); when(certificate.getSubjectX500Principal()).thenReturn(principal); - X509AuthenticationToken token = PkiRealm.token(new X509Certificate[]{certificate}, - Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); + X509AuthenticationToken token = new X509AuthenticationToken(new X509Certificate[]{certificate}); assertThat(token, notNullValue()); - assertThat(token.principal(), is("PKI Client")); assertThat(token.dn(), is("EMAILADDRESS=pki@elastic.co, CN=PKI Client, OU=Security")); + + String parsedPrincipal = PkiRealm.getPrincipalFromSubjectDN(Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), token, + NoOpLogger.INSTANCE); + assertThat(parsedPrincipal, is("PKI Client")); } public void testPKIRealmSettingsPassValidation() throws Exception { @@ -355,10 +360,12 @@ public void testPKIRealmSettingsPassValidation() throws Exception { public void testDelegatedAuthorization() throws Exception { final X509AuthenticationToken token = buildToken(); + String parsedPrincipal = PkiRealm.getPrincipalFromSubjectDN(Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), token, + NoOpLogger.INSTANCE); final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig(new RealmConfig.RealmIdentifier("mock", "other_realm"), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); - final User lookupUser = new User(token.principal()); + final User lookupUser = new User(parsedPrincipal); otherRealm.registerUser(lookupUser); final Settings realmSettings = Settings.builder() @@ -373,7 +380,7 @@ public void testDelegatedAuthorization() throws Exception { assertThat(result.getUser(), sameInstance(lookupUser)); // check that the authorizing realm is consulted even for cached principals - final User lookupUser2 = new User(token.principal()); + final User lookupUser2 = new User(parsedPrincipal); otherRealm.registerUser(lookupUser2); result = authenticate(token, pkiRealm); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java index 52961b52e3815..824655d59c7e5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java @@ -732,7 +732,7 @@ private static List getProtocols() { } else { JavaVersion full = AccessController.doPrivileged( - (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.specification.version"))); + (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { return List.of("TLSv1.2"); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java index 80802db88f942..468b889d4fd21 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java @@ -20,7 +20,6 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; -import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpChannel; @@ -28,7 +27,6 @@ import org.elasticsearch.transport.TestProfiles; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.common.socket.SocketAccess; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; @@ -46,7 +44,6 @@ import javax.net.ssl.SSLServerSocket; import javax.net.ssl.SSLServerSocketFactory; import javax.net.ssl.SSLSocket; - import java.io.IOException; import java.io.UncheckedIOException; import java.net.InetAddress; @@ -117,24 +114,7 @@ public void testConnectException() throws UnknownHostException { Throwable cause = e.getCause(); assertThat(cause, instanceOf(IOException.class)); } - } - public void testBindUnavailableAddress() { - // this is on a lower level since it needs access to the TransportService before it's started - int port = serviceA.boundAddress().publishAddress().getPort(); - Settings settings = Settings.builder() - .put(TransportSettings.PORT.getKey(), port) - .build(); - BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { - MockTransportService transportService = buildService("TS_C", Version.CURRENT, settings); - try { - transportService.start(); - } finally { - transportService.stop(); - transportService.close(); - } - }); - assertEquals("Failed to bind to [" + port + "]", bindTransportException.getMessage()); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java index f70a286efe055..f2fea6f5c7eaf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java @@ -14,26 +14,23 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.xpack.security.transport.AbstractSimpleSecurityTransportTestCase; import java.util.Collections; public class SimpleSecurityNetty4ServerTransportTests extends AbstractSimpleSecurityTransportTestCase { - public MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, - ClusterSettings clusterSettings, boolean doHandshake) { + @Override + protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); NetworkService networkService = new NetworkService(Collections.emptyList()); Settings settings1 = Settings.builder() .put(settings) .put("xpack.security.transport.ssl.enabled", true).build(); - Transport transport = new SecurityNetty4ServerTransport(settings1, version, threadPool, + return new SecurityNetty4ServerTransport(settings1, version, threadPool, networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService(), null, createSSLService(settings1)) { @@ -47,22 +44,5 @@ public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionP } } }; - MockTransportService mockTransportService = - MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, - Collections.emptySet()); - mockTransportService.start(); - return mockTransportService; - } - - @Override - protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { - if (TransportSettings.PORT.exists(settings) == false) { - settings = Settings.builder().put(settings) - .put(TransportSettings.PORT.getKey(), "0") - .build(); - } - MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); - transportService.start(); - return transportService; } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java index e7612c0c0d7fa..842c9f031eff9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java @@ -82,8 +82,8 @@ public void testThatFilterCanPass() throws Exception { InetSocketAddress localhostAddr = new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 12345); NioChannelHandler delegate = mock(NioChannelHandler.class); NioIPFilter nioIPFilter = new NioIPFilter(delegate, localhostAddr, ipFilter, profile); - nioIPFilter.channelRegistered(); - verify(delegate).channelRegistered(); + nioIPFilter.channelActive(); + verify(delegate).channelActive(); assertFalse(nioIPFilter.closeNow()); } @@ -91,8 +91,8 @@ public void testThatFilterCanFail() throws Exception { InetSocketAddress localhostAddr = new InetSocketAddress(InetAddresses.forString("10.0.0.8"), 12345); NioChannelHandler delegate = mock(NioChannelHandler.class); NioIPFilter nioIPFilter = new NioIPFilter(delegate, localhostAddr, ipFilter, profile); - nioIPFilter.channelRegistered(); - verify(delegate, times(0)).channelRegistered(); + nioIPFilter.channelActive(); + verify(delegate, times(0)).channelActive(); assertTrue(nioIPFilter.closeNow()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java index 7efff1c0e26b9..8e0a5ad23af31 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java @@ -24,6 +24,7 @@ import javax.net.ssl.SSLException; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.SocketChannel; import java.util.function.BiConsumer; @@ -73,6 +74,7 @@ public void init() { when(channel.getRawChannel()).thenReturn(rawChannel); exceptionHandler = mock(Consumer.class); context = new SSLChannelContext(channel, selector, exceptionHandler, sslDriver, readWriteHandler, channelBuffer); + context.setSelectionKey(mock(SelectionKey.class)); when(selector.isOnCurrentThread()).thenReturn(true); when(selector.getTaskScheduler()).thenReturn(nioTimer); @@ -331,6 +333,7 @@ public void testCloseTimeoutIsCancelledOnClose() throws IOException { when(channel.getRawChannel()).thenReturn(realChannel); TestReadWriteHandler readWriteHandler = new TestReadWriteHandler(readConsumer); context = new SSLChannelContext(channel, selector, exceptionHandler, sslDriver, readWriteHandler, channelBuffer); + context.setSelectionKey(mock(SelectionKey.class)); context.closeChannel(); ArgumentCaptor captor = ArgumentCaptor.forClass(WriteOperation.class); verify(selector).queueWrite(captor.capture()); @@ -345,29 +348,25 @@ public void testCloseTimeoutIsCancelledOnClose() throws IOException { } } - public void testInitiateCloseFromDifferentThreadSchedulesCloseNotify() throws SSLException { - when(selector.isOnCurrentThread()).thenReturn(false, true); + public void testInitiateCloseSchedulesCloseNotify() throws SSLException { context.closeChannel(); - ArgumentCaptor captor = ArgumentCaptor.forClass(FlushReadyWrite.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(WriteOperation.class); verify(selector).queueWrite(captor.capture()); context.queueWriteOperation(captor.getValue()); verify(sslDriver).initiateClose(); } - public void testInitiateCloseFromSameThreadSchedulesCloseNotify() throws SSLException { + public void testInitiateUnregisteredScheduledDirectClose() throws SSLException { + context.setSelectionKey(null); context.closeChannel(); - ArgumentCaptor captor = ArgumentCaptor.forClass(WriteOperation.class); - verify(selector).queueWrite(captor.capture()); - - context.queueWriteOperation(captor.getValue()); - verify(sslDriver).initiateClose(); + verify(selector).queueChannelClose(channel); } @SuppressWarnings("unchecked") - public void testRegisterInitiatesDriver() throws IOException { + public void testActiveInitiatesDriver() throws IOException { try (Selector realSelector = Selector.open(); SocketChannel realSocket = SocketChannel.open()) { realSocket.configureBlocking(false); @@ -375,7 +374,7 @@ public void testRegisterInitiatesDriver() throws IOException { when(channel.getRawChannel()).thenReturn(realSocket); TestReadWriteHandler readWriteHandler = new TestReadWriteHandler(readConsumer); context = new SSLChannelContext(channel, selector, exceptionHandler, sslDriver, readWriteHandler, channelBuffer); - context.register(); + context.channelActive(); verify(sslDriver).init(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java index a30e1329432db..ae97d2836e8a5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java @@ -14,12 +14,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.xpack.security.transport.AbstractSimpleSecurityTransportTestCase; @@ -27,14 +24,14 @@ public class SimpleSecurityNioTransportTests extends AbstractSimpleSecurityTransportTestCase { - public MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, - ClusterSettings clusterSettings, boolean doHandshake) { + @Override + protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); NetworkService networkService = new NetworkService(Collections.emptyList()); Settings settings1 = Settings.builder() .put(settings) .put("xpack.security.transport.ssl.enabled", true).build(); - Transport transport = new SecurityNioTransport(settings1, version, threadPool, networkService, new MockPageCacheRecycler(settings), + return new SecurityNioTransport(settings1, version, threadPool, networkService, new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService(), null, createSSLService(settings1), new NioGroupFactory(settings, logger)) { @@ -48,22 +45,5 @@ public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionP } } }; - MockTransportService mockTransportService = - MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, - Collections.emptySet()); - mockTransportService.start(); - return mockTransportService; - } - - @Override - protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { - if (TransportSettings.PORT.exists(settings) == false) { - settings = Settings.builder().put(settings) - .put(TransportSettings.PORT.getKey(), "0") - .build(); - } - MockTransportService transportService = nioFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); - transportService.start(); - return transportService; } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java index 7d2bcf1232519..a50d1b5a9b818 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java @@ -164,7 +164,7 @@ private byte[] toByteArray(InputStream is) throws IOException { private static List getProtocols() { JavaVersion full = AccessController.doPrivileged( - (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.specification.version"))); + (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); if (full.compareTo(JavaVersion.parse("11.0.3")) < 0) { return List.of("TLSv1.2"); } diff --git a/x-pack/plugin/spatial/build.gradle b/x-pack/plugin/spatial/build.gradle new file mode 100644 index 0000000000000..068ddd2b97069 --- /dev/null +++ b/x-pack/plugin/spatial/build.gradle @@ -0,0 +1,23 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' + +esplugin { + name 'spatial' + description 'A plugin for Basic Spatial features' + classname 'org.elasticsearch.xpack.spatial.SpatialPlugin' + extendedPlugins = ['x-pack-core'] +} + +dependencies { + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } +} + +// xpack modules are installed in real clusters as the meta plugin, so +// installing them as individual plugins for integ tests doesn't make sense, +// so we disable integ tests +integTest.enabled = false diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialInfoTransportAction.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialInfoTransportAction.java new file mode 100644 index 0000000000000..c701d5b4adb5d --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialInfoTransportAction.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.spatial; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; + +public class SpatialInfoTransportAction extends XPackInfoFeatureTransportAction { + + private final XPackLicenseState licenseState; + + @Inject + public SpatialInfoTransportAction(TransportService transportService, ActionFilters actionFilters, + Settings settings, XPackLicenseState licenseState) { + super(XPackInfoFeatureAction.SPATIAL.name(), transportService, actionFilters); + this.licenseState = licenseState; + } + + @Override + public String name() { + return XPackField.SPATIAL; + } + + @Override + public boolean available() { + return licenseState.isSpatialAllowed(); + } + + @Override + public boolean enabled() { + return true; + } + +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java new file mode 100644 index 0000000000000..b3f72e12afc2a --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.spatial; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; + +import java.util.Arrays; +import java.util.List; + +public class SpatialPlugin extends Plugin implements ActionPlugin { + + public SpatialPlugin(Settings settings) { + } + + @Override + public List> getActions() { + return Arrays.asList( + new ActionPlugin.ActionHandler<>(XPackUsageFeatureAction.SPATIAL, SpatialUsageTransportAction.class), + new ActionPlugin.ActionHandler<>(XPackInfoFeatureAction.SPATIAL, SpatialInfoTransportAction.class)); + } +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialUsageTransportAction.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialUsageTransportAction.java new file mode 100644 index 0000000000000..a2873a2bcf938 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialUsageTransportAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.spatial; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; +import org.elasticsearch.xpack.core.spatial.SpatialFeatureSetUsage; + +public class SpatialUsageTransportAction extends XPackUsageFeatureTransportAction { + + private final Settings settings; + private final XPackLicenseState licenseState; + + @Inject + public SpatialUsageTransportAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Settings settings, XPackLicenseState licenseState) { + super(XPackUsageFeatureAction.SPATIAL.name(), transportService, clusterService, + threadPool, actionFilters, indexNameExpressionResolver); + this.settings = settings; + this.licenseState = licenseState; + } + + @Override + protected void masterOperation(Task task, XPackUsageRequest request, ClusterState state, + ActionListener listener) { + SpatialFeatureSetUsage usage = new SpatialFeatureSetUsage(licenseState.isSpatialAllowed(), true); + listener.onResponse(new XPackUsageFeatureResponse(usage)); + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialInfoTransportActionTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialInfoTransportActionTests.java new file mode 100644 index 0000000000000..dbe0674eef746 --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialInfoTransportActionTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.spatial; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.spatial.SpatialFeatureSetUsage; +import org.elasticsearch.xpack.core.vectors.VectorsFeatureSetUsage; +import org.junit.Before; + +import static org.hamcrest.core.Is.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SpatialInfoTransportActionTests extends ESTestCase { + + private XPackLicenseState licenseState; + + @Before + public void init() { + licenseState = mock(XPackLicenseState.class); + } + + public void testAvailable() throws Exception { + SpatialInfoTransportAction featureSet = new SpatialInfoTransportAction( + mock(TransportService.class), mock(ActionFilters.class), Settings.EMPTY, licenseState); + boolean available = randomBoolean(); + when(licenseState.isSpatialAllowed()).thenReturn(available); + assertThat(featureSet.available(), is(available)); + + var usageAction = new SpatialUsageTransportAction(mock(TransportService.class), null, null, + mock(ActionFilters.class), null, Settings.EMPTY, licenseState); + PlainActionFuture future = new PlainActionFuture<>(); + usageAction.masterOperation(null, null, null, future); + XPackFeatureSet.Usage usage = future.get().getUsage(); + assertThat(usage.available(), is(available)); + + BytesStreamOutput out = new BytesStreamOutput(); + usage.writeTo(out); + XPackFeatureSet.Usage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); + assertThat(serializedUsage.available(), is(available)); + } + + public void testEnabled() throws Exception { + Settings.Builder settings = Settings.builder(); + SpatialInfoTransportAction featureSet = new SpatialInfoTransportAction( + mock(TransportService.class), mock(ActionFilters.class), settings.build(), licenseState); + assertThat(featureSet.enabled(), is(true)); + assertTrue(featureSet.enabled()); + + SpatialUsageTransportAction usageAction = new SpatialUsageTransportAction(mock(TransportService.class), + null, null, mock(ActionFilters.class), null, settings.build(), licenseState); + PlainActionFuture future = new PlainActionFuture<>(); + usageAction.masterOperation(null, null, null, future); + XPackFeatureSet.Usage usage = future.get().getUsage(); + assertTrue(usage.enabled()); + + BytesStreamOutput out = new BytesStreamOutput(); + usage.writeTo(out); + XPackFeatureSet.Usage serializedUsage = new VectorsFeatureSetUsage(out.bytes().streamInput()); + assertTrue(serializedUsage.enabled()); + } + +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index 0a1c0826695bd..82615662d7147 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.sql.jdbc; -import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.proto.StringUtils; @@ -55,7 +55,7 @@ */ final class TypeConverter { - private static WellKnownText WKT = new WellKnownText(true, new GeographyValidator(true)); + private static WellKnownText WKT = new WellKnownText(true, new StandardValidator(true)); private TypeConverter() {} diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index 89f04562fffb2..f33fd4a430312 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -98,7 +98,7 @@ subprojects { apply plugin: 'elasticsearch.rest-test' testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.watcher.enabled', 'false' diff --git a/x-pack/plugin/sql/qa/no-sql/build.gradle b/x-pack/plugin/sql/qa/no-sql/build.gradle new file mode 100644 index 0000000000000..d0f8a3007c4be --- /dev/null +++ b/x-pack/plugin/sql/qa/no-sql/build.gradle @@ -0,0 +1,5 @@ +testClusters.integTest { + setting 'xpack.security.enabled', 'false' + setting 'xpack.sql.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' +} diff --git a/x-pack/plugin/sql/qa/no-sql/src/test/java/org/elasticsearch/xpack/sql/qa/no_sql/JdbcNoSqlIT.java b/x-pack/plugin/sql/qa/no-sql/src/test/java/org/elasticsearch/xpack/sql/qa/no_sql/JdbcNoSqlIT.java new file mode 100644 index 0000000000000..c1c59877fdcc2 --- /dev/null +++ b/x-pack/plugin/sql/qa/no-sql/src/test/java/org/elasticsearch/xpack/sql/qa/no_sql/JdbcNoSqlIT.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.no_sql; + +import org.elasticsearch.xpack.sql.qa.jdbc.JdbcNoSqlTestCase; + +public class JdbcNoSqlIT extends JdbcNoSqlTestCase { + +} diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index 2cf410ed3d908..2774c4b85f4a5 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -30,7 +30,7 @@ subprojects { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' // Setup auditing so we can use it in some tests setting 'xpack.security.audit.enabled', 'true' setting 'xpack.security.enabled', 'true' diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index 256d7cb612cf0..e26313cbad9a5 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.geo.geometry.Geometry; import org.elasticsearch.geo.geometry.Point; -import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.jdbc.EsType; import org.elasticsearch.xpack.sql.proto.StringUtils; @@ -52,7 +52,7 @@ public class JdbcAssert { private static final IntObjectHashMap SQL_TO_TYPE = new IntObjectHashMap<>(); - private static final WellKnownText WKT = new WellKnownText(true, new GeographyValidator(true)); + private static final WellKnownText WKT = new WellKnownText(true, new StandardValidator(true)); static { for (EsType type : EsType.values()) { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcNoSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcNoSqlTestCase.java new file mode 100644 index 0000000000000..1b4e37dbc2b8c --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcNoSqlTestCase.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.jdbc; + +import java.sql.Connection; +import java.sql.SQLException; + +public class JdbcNoSqlTestCase extends JdbcIntegrationTestCase { + + public void testJdbcExceptionMessage() throws SQLException { + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM bla").executeQuery()); + assertTrue(e.getMessage().startsWith("X-Pack/SQL does not seem to be available on the Elasticsearch" + + " node using the access path")); + } + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 7c7288d6a3539..e352e63d70dcc 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.xcontent.XContentHelper; @@ -414,6 +415,85 @@ protected Map runSql(HttpEntity sql, String suffix) throws IOExc } } + public void testPrettyPrintingEnabled() throws IOException { + boolean columnar = randomBoolean(); + String expected = ""; + if (columnar) { + expected = "{\n" + + " \"columns\" : [\n" + + " {\n" + + " \"name\" : \"test1\",\n" + + " \"type\" : \"text\"\n" + + " }\n" + + " ],\n" + + " \"values\" : [\n" + + " [\n" + + " \"test1\",\n" + + " \"test2\"\n" + + " ]\n" + + " ]\n" + + "}\n"; + } else { + expected = "{\n" + + " \"columns\" : [\n" + + " {\n" + + " \"name\" : \"test1\",\n" + + " \"type\" : \"text\"\n" + + " }\n" + + " ],\n" + + " \"rows\" : [\n" + + " [\n" + + " \"test1\"\n" + + " ],\n" + + " [\n" + + " \"test2\"\n" + + " ]\n" + + " ]\n" + + "}\n"; + } + executeAndAssertPrettyPrinting(expected, "true", columnar); + } + + public void testPrettyPrintingDisabled() throws IOException { + boolean columnar = randomBoolean(); + String expected = ""; + if (columnar) { + expected = "{\"columns\":[{\"name\":\"test1\",\"type\":\"text\"}],\"values\":[[\"test1\",\"test2\"]]}"; + } else { + expected = "{\"columns\":[{\"name\":\"test1\",\"type\":\"text\"}],\"rows\":[[\"test1\"],[\"test2\"]]}"; + } + executeAndAssertPrettyPrinting(expected, randomFrom("false", null), columnar); + } + + private void executeAndAssertPrettyPrinting(String expectedJson, String prettyParameter, boolean columnar) + throws IOException { + index("{\"test1\":\"test1\"}", + "{\"test1\":\"test2\"}"); + + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); + if (prettyParameter != null) { + request.addParameter("pretty", prettyParameter); + } + if (randomBoolean()) { + // We default to JSON but we force it randomly for extra coverage + request.addParameter("format", "json"); + } + if (randomBoolean()) { + // JSON is the default but randomly set it sometime for extra coverage + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Accept", randomFrom("*/*", "application/json")); + request.setOptions(options); + } + request.setEntity(new StringEntity("{\"query\":\"SELECT * FROM test\"" + mode("plain") + columnarParameter(columnar) + "}", + ContentType.APPLICATION_JSON)); + + Response response = client().performRequest(request); + try (InputStream content = response.getEntity().getContent()) { + String actualJson = new BytesArray(content.readAllBytes()).utf8ToString(); + assertEquals(expectedJson, actualJson); + } + } + public void testBasicTranslateQuery() throws IOException { index("{\"test\":\"test\"}", "{\"test\":\"test\"}"); diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec index 31f3857216c0b..8ee9a44adff9a 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec @@ -286,3 +286,18 @@ Phoenix |Americas |-111.97350500151515 Chicago |Americas |-87.63787407428026 New York |Americas |-73.9900270756334 ; + +selectLargeLat +SELECT ST_X(ST_WKTToSQL('LINESTRING (200 100, 300 400)')) x; + + x:d +200.0 +; + +selectLargeLon +SELECT ST_Y(ST_WKTToSQL('LINESTRING (200 100, 300 400)')) y; + + y:d +100.0 +// end::y +; \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 deleted file mode 100644 index 4a6aa7b098686..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -46d614acdeb42f4661e91347100217bc72aae11e \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 new file mode 100644 index 0000000000000..8e703b4ec6b84 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 @@ -0,0 +1 @@ +79f8f65bf5a536b95a5e1074ba431544a0a73fcb \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlRequest.java index ea5cce74ed392..f5965cc0e681f 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlRequest.java @@ -51,11 +51,6 @@ public ActionRequestValidationException validate() { return validationException; } - @Override - public final void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorAction.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorAction.java index 37cc028bf993b..1811698bfd3e4 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorAction.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.xpack.sql.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class SqlClearCursorAction extends StreamableResponseActionType { +public class SqlClearCursorAction extends ActionType { public static final SqlClearCursorAction INSTANCE = new SqlClearCursorAction(); public static final String NAME = "indices:data/read/sql/close_cursor"; private SqlClearCursorAction() { - super(NAME); - } - - @Override - public SqlClearCursorResponse newResponse() { - return new SqlClearCursorResponse(); + super(NAME, SqlClearCursorResponse::new); } } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponse.java index 1a677a713c8e2..4ac4fc9c84e3d 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponse.java @@ -30,7 +30,9 @@ public SqlClearCursorResponse(boolean succeeded) { this.succeeded = succeeded; } - SqlClearCursorResponse() { + SqlClearCursorResponse(StreamInput in) throws IOException { + super(in); + succeeded = in.readBoolean(); } /** @@ -58,12 +60,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - succeeded = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(succeeded); diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryAction.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryAction.java index 660d0ff19661b..cd1932d7fd18f 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryAction.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryAction.java @@ -5,19 +5,14 @@ */ package org.elasticsearch.xpack.sql.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class SqlQueryAction extends StreamableResponseActionType { +public class SqlQueryAction extends ActionType { public static final SqlQueryAction INSTANCE = new SqlQueryAction(); public static final String NAME = "indices:data/read/sql"; private SqlQueryAction() { - super(NAME); - } - - @Override - public SqlQueryResponse newResponse() { - return new SqlQueryResponse(); + super(NAME, SqlQueryResponse::new); } } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index 5ba5ec3f232f5..5553b44446057 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -40,7 +40,34 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject private List> rows; private static final String INTERVAL_CLASS_NAME = "Interval"; - public SqlQueryResponse() { + public SqlQueryResponse(StreamInput in) throws IOException { + super(in); + cursor = in.readString(); + if (in.readBoolean()) { + // We might have rows without columns and we might have columns without rows + // So we send the column size twice, just to keep the protocol simple + int columnCount = in.readVInt(); + List columns = new ArrayList<>(columnCount); + for (int c = 0; c < columnCount; c++) { + columns.add(readColumnInfo(in)); + } + this.columns = unmodifiableList(columns); + } else { + this.columns = null; + } + int rowCount = in.readVInt(); + List> rows = new ArrayList<>(rowCount); + if (rowCount > 0) { + int columnCount = in.readVInt(); + for (int r = 0; r < rowCount; r++) { + List row = new ArrayList<>(columnCount); + for (int c = 0; c < columnCount; c++) { + row.add(in.readGenericValue()); + } + rows.add(unmodifiableList(row)); + } + } + this.rows = unmodifiableList(rows); } public SqlQueryResponse(String cursor, Mode mode, boolean columnar, @Nullable List columns, List> rows) { @@ -90,36 +117,6 @@ public SqlQueryResponse rows(List> rows) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - cursor = in.readString(); - if (in.readBoolean()) { - // We might have rows without columns and we might have columns without rows - // So we send the column size twice, just to keep the protocol simple - int columnCount = in.readVInt(); - List columns = new ArrayList<>(columnCount); - for (int c = 0; c < columnCount; c++) { - columns.add(readColumnInfo(in)); - } - this.columns = unmodifiableList(columns); - } else { - this.columns = null; - } - int rowCount = in.readVInt(); - List> rows = new ArrayList<>(rowCount); - if (rowCount > 0) { - int columnCount = in.readVInt(); - for (int r = 0; r < rowCount; r++) { - List row = new ArrayList<>(columnCount); - for (int c = 0; c < columnCount; c++) { - row.add(in.readGenericValue()); - } - rows.add(unmodifiableList(row)); - } - } - this.rows = unmodifiableList(rows); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(cursor); diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateAction.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateAction.java index b858c7642d163..f12a1e58b9076 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateAction.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateAction.java @@ -5,22 +5,17 @@ */ package org.elasticsearch.xpack.sql.action; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; /** * Sql action for translating SQL queries into ES requests */ -public class SqlTranslateAction extends StreamableResponseActionType { +public class SqlTranslateAction extends ActionType { public static final SqlTranslateAction INSTANCE = new SqlTranslateAction(); public static final String NAME = "indices:data/read/sql/translate"; private SqlTranslateAction() { - super(NAME); - } - - @Override - public SqlTranslateResponse newResponse() { - return new SqlTranslateResponse(); + super(NAME, SqlTranslateResponse::new); } } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponse.java index e2efd4b46b674..8863788f5ee88 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponse.java @@ -21,7 +21,9 @@ public class SqlTranslateResponse extends ActionResponse implements ToXContentObject { private SearchSourceBuilder source; - public SqlTranslateResponse() { + public SqlTranslateResponse(StreamInput in) throws IOException { + super(in); + source = new SearchSourceBuilder(in); } public SqlTranslateResponse(SearchSourceBuilder source) { @@ -32,11 +34,6 @@ public SearchSourceBuilder source() { return source; } - @Override - public void readFrom(StreamInput in) throws IOException { - source = new SearchSourceBuilder(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { source.writeTo(out); diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponseTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponseTests.java index 9e9f200abac52..1b4a71e097776 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponseTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlClearCursorResponseTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.sql.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; -public class SqlClearCursorResponseTests extends AbstractStreamableXContentTestCase { +public class SqlClearCursorResponseTests extends AbstractSerializingTestCase { @Override protected SqlClearCursorResponse createTestInstance() { @@ -16,8 +17,8 @@ protected SqlClearCursorResponse createTestInstance() { } @Override - protected SqlClearCursorResponse createBlankInstance() { - return new SqlClearCursorResponse(); + protected Writeable.Reader instanceReader() { + return SqlClearCursorResponse::new; } @Override diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryResponseTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryResponseTests.java index 966e16e405731..7dd3f4aade637 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryResponseTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryResponseTests.java @@ -7,11 +7,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; @@ -25,10 +26,10 @@ import java.util.function.Supplier; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; -import static org.hamcrest.Matchers.hasSize; import static org.elasticsearch.xpack.sql.action.AbstractSqlQueryRequest.CURSOR; +import static org.hamcrest.Matchers.hasSize; -public class SqlQueryResponseTests extends AbstractStreamableXContentTestCase { +public class SqlQueryResponseTests extends AbstractSerializingTestCase { static String randomStringCursor() { return randomBoolean() ? "" : randomAlphaOfLength(10); @@ -39,6 +40,11 @@ protected SqlQueryResponse createTestInstance() { return createRandomInstance(randomStringCursor(), randomFrom(Mode.values()), randomBoolean()); } + @Override + protected Writeable.Reader instanceReader() { + return SqlQueryResponse::new; + } + public static SqlQueryResponse createRandomInstance(String cursor, Mode mode, boolean columnar) { int columnCount = between(1, 10); @@ -78,11 +84,6 @@ public static SqlQueryResponse createRandomInstance(String cursor, Mode mode, bo return new SqlQueryResponse(cursor, mode, false, columns, rows); } - @Override - protected SqlQueryResponse createBlankInstance() { - return new SqlQueryResponse(); - } - public void testToXContent() throws IOException { SqlQueryResponse testInstance = createTestInstance(); diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java index 76a04d03435e3..560db968a1177 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.sql.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.AbstractStreamableTestCase; -import org.elasticsearch.xpack.sql.action.SqlTranslateResponse; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; -public class SqlTranslateResponseTests extends AbstractStreamableTestCase { +public class SqlTranslateResponseTests extends AbstractWireSerializingTestCase { @Override protected SqlTranslateResponse createTestInstance() { @@ -36,8 +36,8 @@ protected SqlTranslateResponse createTestInstance() { } @Override - protected SqlTranslateResponse createBlankInstance() { - return new SqlTranslateResponse(); + protected Writeable.Reader instanceReader() { + return SqlTranslateResponse::new; } @Override diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java index 59a6e82e9874d..716b1bb058a53 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java @@ -48,8 +48,8 @@ public class JreHttpUrlConnection implements Closeable { * error. */ public static final String SQL_STATE_BAD_SERVER = "bad_server"; - private static final String SQL_NOT_AVAILABLE_ERROR_MESSAGE = "request [" + SQL_QUERY_REST_ENDPOINT - + "] contains unrecognized parameter: [mode]"; + private static final String SQL_NOT_AVAILABLE_ERROR_MESSAGE = "Incorrect HTTP method for uri [" + SQL_QUERY_REST_ENDPOINT + + "?error_trace] and method [POST], allowed:"; public static R http(String path, String query, ConnectionConfiguration cfg, Function handler) { final URI uriPath = cfg.baseUri().resolve(path); // update path if needed @@ -181,9 +181,8 @@ private ResponseOrException parserError() throws IOException { if (type == null) { // check if x-pack or sql are not available (x-pack not installed or sql not enabled) // by checking the error message the server is sending back - if (con.getResponseCode() >= HttpURLConnection.HTTP_BAD_REQUEST - && failure.reason().contains(SQL_NOT_AVAILABLE_ERROR_MESSAGE)) { - return new ResponseOrException<>(new SQLException("X-Pack/SQL do not seem to be available" + if (con.getResponseCode() >= HttpURLConnection.HTTP_BAD_REQUEST && failure.reason().contains(SQL_NOT_AVAILABLE_ERROR_MESSAGE)) { + return new ResponseOrException<>(new SQLException("X-Pack/SQL does not seem to be available" + " on the Elasticsearch node using the access path '" + con.getURL().getHost() + (con.getURL().getPort() > 0 ? ":" + con.getURL().getPort() : "") diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/RemoteFailure.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/RemoteFailure.java index 61e62c390ec11..aca3003d66888 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/RemoteFailure.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/RemoteFailure.java @@ -155,10 +155,15 @@ private static RemoteFailure parseResponseTopLevel(JsonParser parser) throws IOE } else { switch (fieldName) { case "error": - if (token != JsonToken.START_OBJECT) { - throw new IOException("Expected [error] to be an object but was [" + token + "][" + parser.getText() + "]"); + if (token != JsonToken.START_OBJECT && token != JsonToken.VALUE_STRING) { + throw new IOException("Expected [error] to be an object or string but was [" + token + "][" + + parser.getText() + "]"); + } + if (token == JsonToken.VALUE_STRING) { + exception = new RemoteFailure(StringUtils.EMPTY, parser.getText(), null, null, null, null); + } else { + exception = parseFailure(parser); } - exception = parseFailure(parser); continue; case "status": if (token != JsonToken.VALUE_NUMBER_INT) { diff --git a/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/RemoteFailureTests.java b/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/RemoteFailureTests.java index ee3a859b548da..2029493bcbec4 100644 --- a/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/RemoteFailureTests.java +++ b/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/RemoteFailureTests.java @@ -70,9 +70,9 @@ public void testNoError() { public void testBogusError() { IOException e = expectThrows(IOException.class, () -> parse("bogus_error.json")); assertEquals( - "Can't parse error from Elasticsearch [Expected [error] to be an object but was [VALUE_STRING][bogus]] " + "Can't parse error from Elasticsearch [Expected [error] to be an object or string but was [START_ARRAY][[]] " + "at [line 1 col 12]. Response:\n" - + "{ \"error\": \"bogus\" }", + + "{ \"error\": [\"bogus\"] }", e.getMessage()); } diff --git a/x-pack/plugin/sql/sql-client/src/test/resources/remote_failure/bogus_error.json b/x-pack/plugin/sql/sql-client/src/test/resources/remote_failure/bogus_error.json index f79361cec1c95..49c31ca6f54bb 100644 --- a/x-pack/plugin/sql/sql-client/src/test/resources/remote_failure/bogus_error.json +++ b/x-pack/plugin/sql/sql-client/src/test/resources/remote_failure/bogus_error.json @@ -1 +1 @@ -{ "error": "bogus" } \ No newline at end of file +{ "error": ["bogus"] } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 2fb5028e987e8..65b59a6f2ce89 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -497,7 +497,7 @@ private static List buildIndices(String[] indexNames, String javaRegex, if (unmappedIndices.isEmpty() == true) { concreteIndices = asList(capIndices); } else { - concreteIndices = new ArrayList<>(capIndices.length - unmappedIndices.size() + 1); + concreteIndices = new ArrayList<>(capIndices.length); for (String capIndex : capIndices) { // add only indices that have a mapping if (unmappedIndices.contains(capIndex) == false) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java index 1c3d1e7c9358b..ad0bbdd7c94c8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java @@ -29,7 +29,7 @@ import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.Polygon; import org.elasticsearch.geo.geometry.Rectangle; -import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.StandardValidator; import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; @@ -51,7 +51,7 @@ public class GeoShape implements ToXContentFragment, NamedWriteable { private final Geometry shape; - private static final GeometryValidator validator = new GeographyValidator(true); + private static final GeometryValidator validator = new StandardValidator(true); private static final GeometryParser GEOMETRY_PARSER = new GeometryParser(true, true, true); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java index 37adb44a95557..e391850dd1776 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java @@ -25,12 +25,12 @@ public TableIdentifier visitTableIdentifier(TableIdentifierContext ctx) { ParseTree tree = ctx.name != null ? ctx.name : ctx.TABLE_IDENTIFIER(); String index = tree.getText(); - return new TableIdentifier(source, visitIdentifier(ctx.catalog), index); + return new TableIdentifier(source, visitIdentifier(ctx.catalog), unquoteIdentifier(index)); } @Override public String visitIdentifier(IdentifierContext ctx) { - return ctx == null ? null : ctx.getText(); + return ctx == null ? null : unquoteIdentifier(ctx.getText()); } @Override @@ -41,4 +41,8 @@ public String visitQualifiedName(QualifiedNameContext ctx) { return Strings.collectionToDelimitedString(visitList(ctx.identifier(), String.class), "."); } + + private static String unquoteIdentifier(String identifier) { + return identifier.replace("\"\"", "\""); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java index 771ab30d0f4c1..bae5a85948460 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -95,7 +95,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> client.execute(SqlQueryAction.INSTANCE, sqlRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(SqlQueryResponse response) throws Exception { - XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); + XContentBuilder builder = channel.newBuilder(request.getXContentType(), xContentType, true); response.toXContent(builder, request); return new BytesRestResponse(RestStatus.OK, builder); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsAction.java index a955b442bf970..8b3b61bc82454 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsAction.java @@ -6,20 +6,14 @@ package org.elasticsearch.xpack.sql.plugin; -import org.elasticsearch.action.StreamableResponseActionType; +import org.elasticsearch.action.ActionType; -public class SqlStatsAction extends StreamableResponseActionType { +public class SqlStatsAction extends ActionType { public static final SqlStatsAction INSTANCE = new SqlStatsAction(); public static final String NAME = "cluster:monitor/xpack/sql/stats/dist"; private SqlStatsAction() { - super(NAME); + super(NAME, SqlStatsResponse::new); } - - @Override - public SqlStatsResponse newResponse() { - return new SqlStatsResponse(); - } - } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsRequest.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsRequest.java index ed0570bf5c16d..bdc01f11663a4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsRequest.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsRequest.java @@ -19,8 +19,14 @@ public class SqlStatsRequest extends BaseNodesRequest { private boolean includeStats; - + public SqlStatsRequest() { + super((String[]) null); + } + + public SqlStatsRequest(StreamInput in) throws IOException { + super(in); + includeStats = in.readBoolean(); } public boolean includeStats() { @@ -30,12 +36,6 @@ public boolean includeStats() { public void includeStats(boolean includeStats) { this.includeStats = includeStats; } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - includeStats = in.readBoolean(); - } @Override public void writeTo(StreamOutput out) throws IOException { @@ -51,7 +51,10 @@ public String toString() { static class NodeStatsRequest extends BaseNodeRequest { boolean includeStats; - NodeStatsRequest() {} + NodeStatsRequest(StreamInput in) throws IOException { + super(in); + includeStats = in.readBoolean(); + } NodeStatsRequest(SqlStatsRequest request) { includeStats = request.includeStats(); @@ -61,12 +64,6 @@ public boolean includeStats() { return includeStats; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - includeStats = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsResponse.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsResponse.java index 7e584fddc1957..603b0422829ea 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsResponse.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsResponse.java @@ -22,7 +22,8 @@ public class SqlStatsResponse extends BaseNodesResponse implements ToXContentObject { - public SqlStatsResponse() { + public SqlStatsResponse(StreamInput in) throws IOException { + super(in); } public SqlStatsResponse(ClusterName clusterName, List nodes, List failures) { @@ -36,7 +37,7 @@ protected List readNodesFrom(StreamInput in) throws IOExcepti @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeStreamableList(nodes); + out.writeList(nodes); } @Override @@ -54,7 +55,11 @@ public static class NodeStatsResponse extends BaseNodeResponse implements ToXCon private Counters stats; - public NodeStatsResponse() { + public NodeStatsResponse(StreamInput in) throws IOException { + super(in); + if (in.readBoolean()) { + stats = new Counters(in); + } } public NodeStatsResponse(DiscoveryNode node) { @@ -68,14 +73,6 @@ public Counters getStats() { public void setStats(Counters stats) { this.stats = stats; } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - if (in.readBoolean()) { - stats = Counters.read(in); - } - } @Override public void writeTo(StreamOutput out) throws IOException { @@ -97,9 +94,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } static SqlStatsResponse.NodeStatsResponse readNodeResponse(StreamInput in) throws IOException { - SqlStatsResponse.NodeStatsResponse node = new SqlStatsResponse.NodeStatsResponse(); - node.readFrom(in); - return node; + return new SqlStatsResponse.NodeStatsResponse(in); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java index 98bb25b8ebd81..6d3802c341f1d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.sql.action.SqlClearCursorRequest; @@ -31,7 +30,7 @@ public class TransportSqlClearCursorAction extends HandledTransportAction) SqlClearCursorRequest::new); + super(NAME, transportService, actionFilters, SqlClearCursorRequest::new); this.planExecutor = planExecutor; this.sqlLicenseChecker = sqlLicenseChecker; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index 97fc583c3e886..3e9c30f49b453 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -49,7 +48,7 @@ public class TransportSqlQueryAction extends HandledTransportAction) SqlQueryRequest::new); + super(SqlQueryAction.NAME, transportService, actionFilters, SqlQueryRequest::new); this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings) ? new SecurityContext(settings, threadPool.getThreadContext()) : null; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlStatsAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlStatsAction.java index acd223d03fa05..9340b2a83947c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlStatsAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlStatsAction.java @@ -10,11 +10,13 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import java.io.IOException; import java.util.List; /** @@ -47,8 +49,8 @@ protected SqlStatsRequest.NodeStatsRequest newNodeRequest(SqlStatsRequest reques } @Override - protected SqlStatsResponse.NodeStatsResponse newNodeResponse() { - return new SqlStatsResponse.NodeStatsResponse(); + protected SqlStatsResponse.NodeStatsResponse newNodeResponse(StreamInput in) throws IOException { + return new SqlStatsResponse.NodeStatsResponse(in); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java index 9101557d1cd1a..b37c25fcab933 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -40,7 +39,7 @@ public class TransportSqlTranslateAction extends HandledTransportAction) SqlTranslateRequest::new); + super(SqlTranslateAction.NAME, transportService, actionFilters, SqlTranslateRequest::new); this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings) ? new SecurityContext(settings, threadPool.getThreadContext()) : null; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java index 561347b89979c..fbd004a71dfb4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.InvalidMappedField; +import org.elasticsearch.xpack.sql.type.KeywordEsField; import org.elasticsearch.xpack.sql.type.TypesTests; import java.util.ArrayList; @@ -164,8 +165,6 @@ public void testMergeIncompatibleCapabilitiesOfObjectFields() throws Exception { ((InvalidMappedField) esField).errorMessage()); } - - public void testSeparateSameMappingDifferentIndices() throws Exception { Map oneMapping = TypesTests.loadMapping("mapping-basic.json", true); Map sameMapping = TypesTests.loadMapping("mapping-basic.json", true); @@ -192,6 +191,26 @@ public void testSeparateIncompatibleTypes() throws Exception { assertEqualsMaps(incompatible, indices.get(1).mapping()); } + // covers the scenario described in https://github.com/elastic/elasticsearch/issues/43876 + public void testMultipleCompatibleIndicesWithDifferentFields() { + int indicesCount = randomIntBetween(2, 15); + EsIndex[] expectedIndices = new EsIndex[indicesCount]; + + // each index will have one field with different name than all others + for (int i = 0; i < indicesCount; i++) { + Map mapping = new HashMap<>(1); + String fieldName = "field" + (i + 1); + mapping.put(fieldName, new KeywordEsField(fieldName)); + expectedIndices[i] = new EsIndex("index" + (i + 1), mapping); + } + + List actualIndices = separate(expectedIndices); + assertEquals(indicesCount, actualIndices.size()); + for (int i = 0; i < indicesCount; i++) { + assertEqualsMaps(expectedIndices[i].mapping(), actualIndices.get(i).mapping()); + } + } + public static IndexResolution merge(EsIndex... indices) { return IndexResolver.mergedMappings("*", Stream.of(indices).map(EsIndex::name).toArray(String[]::new), fromMappings(indices)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java index f9b0fc18bca52..ca31e32b2edc3 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java @@ -6,7 +6,10 @@ package org.elasticsearch.xpack.sql.parser; import com.google.common.base.Joiner; + import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.NamedExpression; import org.elasticsearch.xpack.sql.expression.Order; import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; @@ -21,6 +24,7 @@ import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.logical.OrderBy; import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; import java.util.ArrayList; import java.util.List; @@ -46,6 +50,24 @@ private T singleProjection(Project project, Class type) { return type.cast(p); } + public void testEscapeDoubleQuotes() { + Project project = project(parseStatement("SELECT bar FROM \"fo\"\"o\"")); + assertTrue(project.child() instanceof UnresolvedRelation); + assertEquals("fo\"o", ((UnresolvedRelation) project.child()).table().index()); + } + + public void testEscapeSingleQuotes() { + Alias a = singleProjection(project(parseStatement("SELECT '''ab''c' AS \"escaped_text\"")), Alias.class); + assertEquals("'ab'c", ((Literal) a.child()).value()); + assertEquals("escaped_text", a.name()); + } + + public void testEscapeSingleAndDoubleQuotes() { + Alias a = singleProjection(project(parseStatement("SELECT 'ab''c' AS \"escaped\"\"text\"")), Alias.class); + assertEquals("ab'c", ((Literal) a.child()).value()); + assertEquals("escaped\"text", a.name()); + } + public void testSelectField() { UnresolvedAttribute a = singleProjection(project(parseStatement("SELECT bar FROM foo")), UnresolvedAttribute.class); assertEquals("bar", a.name()); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.delete_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.delete_data_frame_transform.json index 30da72491df11..accb791320b91 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.delete_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.delete_data_frame_transform.json @@ -13,6 +13,13 @@ "required": true, "description": "The id of the transform to delete" } + }, + "params": { + "force": { + "type": "boolean", + "required": false, + "description": "When `true`, the transform is deleted regardless of its current state. The default value is `false`, meaning that the transform must be `stopped` before it can be deleted." + } } }, "body": null diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.put_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.put_data_frame_transform.json index 919682676da3c..69f740c059b95 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.put_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.put_data_frame_transform.json @@ -11,6 +11,13 @@ "required": true, "description": "The id of the new transform." } + }, + "params": { + "defer_validation": { + "type": "boolean", + "required": false, + "description": "If validations should be deferred until data frame transform starts, defaults to false." + } } }, "body": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.delete_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.delete_lifecycle.json new file mode 100644 index 0000000000000..f586351bbf59d --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.delete_lifecycle.json @@ -0,0 +1,20 @@ +{ + "slm.delete_lifecycle": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html", + "stability": "stable", + "methods": [ "DELETE" ], + "url": { + "path": "/_slm/policy/{policy_id}", + "paths": ["/_slm/policy/{policy_id}"], + "parts": { + "policy": { + "type" : "string", + "description" : "The id of the snapshot lifecycle policy to remove" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.execute_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.execute_lifecycle.json new file mode 100644 index 0000000000000..82b6d9b52d856 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.execute_lifecycle.json @@ -0,0 +1,20 @@ +{ + "slm.execute_lifecycle": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html", + "stability": "stable", + "methods": [ "PUT" ], + "url": { + "path": "/_slm/policy/{policy_id}/_execute", + "paths": ["/_slm/policy/{policy_id}/_execute"], + "parts": { + "policy_id": { + "type" : "string", + "description" : "The id of the snapshot lifecycle policy to be executed" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_lifecycle.json new file mode 100644 index 0000000000000..2f588aff23527 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_lifecycle.json @@ -0,0 +1,20 @@ +{ + "slm.get_lifecycle": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html", + "stability": "stable", + "methods": [ "GET" ], + "url": { + "path": "/_slm/policy/{policy_id}", + "paths": ["/_slm/policy/{policy_id}", "/_slm/policy"], + "parts": { + "policy_id": { + "type" : "string", + "description" : "Comma-separated list of snapshot lifecycle policies to retrieve" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.put_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.put_lifecycle.json new file mode 100644 index 0000000000000..3f88ae4a836b9 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.put_lifecycle.json @@ -0,0 +1,22 @@ +{ + "slm.put_lifecycle": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html", + "stability": "stable", + "methods": [ "PUT" ], + "url": { + "path": "/_slm/policy/{policy_id}", + "paths": ["/_slm/policy/{policy_id}"], + "parts": { + "policy_id": { + "type" : "string", + "description" : "The id of the snapshot lifecycle policy" + } + }, + "params": { + } + }, + "body": { + "description": "The snapshot lifecycle policy definition to register" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index bfde8128b491c..eac2490543605 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -42,6 +42,40 @@ setup: data_frame.delete_data_frame_transform: transform_id: "missing transform" +--- +"Test put transform with frequency too low": + - do: + catch: /minimum permitted \[frequency\] is \[1s\]/ + data_frame.put_data_frame_transform: + transform_id: "frequency-too-low" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-dest" }, + "frequency": "999ms", + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + +--- +"Test put transform with frequency too high": + - do: + catch: /highest permitted \[frequency\] is \[1h\]/ + data_frame.put_data_frame_transform: + transform_id: "frequency-too-low" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-dest" }, + "frequency": "3600001ms", + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + --- "Test put transform with invalid source index": - do: @@ -57,6 +91,20 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } + - do: + data_frame.put_data_frame_transform: + transform_id: "missing-source-transform" + defer_validation: true + body: > + { + "source": { "index": "missing-index" }, + "dest": { "index": "missing-source-dest" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - match: { acknowledged: true } --- "Test basic transform crud": - do: @@ -225,6 +273,39 @@ setup: - match: { transforms.0.sync.time.field: "time" } - match: { transforms.0.sync.time.delay: "90m" } --- +"Test PUT continuous transform without delay set": + - do: + data_frame.put_data_frame_transform: + transform_id: "airline-transform-continuous" + body: > + { + "source": { + "index": "airline-data" + }, + "dest": { "index": "airline-data-by-airline-continuous" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + }, + "sync": { + "time": { + "field": "time" + } + } + } + - match: { acknowledged: true } + - do: + data_frame.get_data_frame_transform: + transform_id: "airline-transform-continuous" + - match: { count: 1 } + - match: { transforms.0.id: "airline-transform-continuous" } + - match: { transforms.0.source.index.0: "airline-data" } + - match: { transforms.0.dest.index: "airline-data-by-airline-continuous" } + - match: { transforms.0.pivot.group_by.airline.terms.field: "airline" } + - match: { transforms.0.pivot.aggregations.avg_response.avg.field: "responsetime" } + - match: { transforms.0.sync.time.field: "time" } + - match: { transforms.0.sync.time.delay: "60s" } +--- "Test transform with invalid page parameter": - do: catch: /Param \[size\] has a max acceptable value of \[1000\]/ @@ -249,6 +330,22 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } + + - do: + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + defer_validation: true + body: > + { + "source": { + "index": ["airline-data*"] + }, + "dest": { "index": "airline-data-by-airline" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } --- "Test alias scenarios": - do: @@ -491,3 +588,47 @@ setup: "description": "yaml test transform on airline-data", "version": "7.3.0" } +--- +"Test force deleting a running transform": + - do: + data_frame.put_data_frame_transform: + transform_id: "airline-transform-start-delete" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline-start-delete" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + }, + "sync": { + "time": { + "field": "time", + "delay": "90m" + } + } + } + - match: { acknowledged: true } + - do: + data_frame.start_data_frame_transform: + transform_id: "airline-transform-start-delete" + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "airline-transform-start-delete" + - match: { count: 1 } + - match: { transforms.0.id: "airline-transform-start-delete" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } + - match: { transforms.0.state.task_state: "started" } + + - do: + catch: /Cannot delete data frame \[airline-transform-start-delete\] as the task is running/ + data_frame.delete_data_frame_transform: + transform_id: "airline-transform-start-delete" + + - do: + data_frame.delete_data_frame_transform: + transform_id: "airline-transform-start-delete" + force: true + - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index e4ff3c813ce0f..9bea5b9bb3ad7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -141,6 +141,12 @@ teardown: "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + }, + "sync": { + "time": { + "field": "time", + "delay": "90m" + } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml index fd6163761dcb9..1962fa232c8e9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml @@ -51,7 +51,7 @@ setup: # unfreeze index - do: - indices.freeze: + indices.unfreeze: index: test - is_true: acknowledged diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/30_usage.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/30_usage.yml new file mode 100644 index 0000000000000..9135c19f6794a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/30_usage.yml @@ -0,0 +1,60 @@ +--- +setup: + - do: + indices.create: + index: test + - do: + cluster.health: + wait_for_no_initializing_shards: true + +--- +"Usage stats on frozen indices": + - skip: + version: " - 7.9.99" + reason: "frozen indices have usage stats starting in version 8.0.0" + + - do: + index: + index: test + id: 1 + body: { "foo": "bar" } + + - do: + index: + index: test + id: 2 + body: { "foo": "bar" } + + - do: + index: + index: test + id: 3 + body: { "foo": "bar" } + + - do: {xpack.usage: {}} + - match: { frozen_indices.available: true } + - match: { frozen_indices.enabled: true } + - match: { frozen_indices.indices_count: 0 } + + # freeze index + - do: + indices.freeze: + index: test + - is_true: acknowledged + + + - do: {xpack.usage: {}} + - match: { frozen_indices.available: true } + - match: { frozen_indices.enabled: true } + - match: { frozen_indices.indices_count: 1 } + + # unfreeze index + - do: + indices.unfreeze: + index: test + - is_true: acknowledged + + - do: {xpack.usage: {}} + - match: { frozen_indices.available: true } + - match: { frozen_indices.enabled: true } + - match: { frozen_indices.indices_count: 0 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml index ef844d61f1626..46d903977eb21 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml @@ -8,6 +8,8 @@ setup: "is_outlier": false, "is_outlier_int": 0, "outlier_score": 0.0, + "regression_field_act": 10.9, + "regression_field_pred": 10.9, "all_true_field": true, "all_false_field": false } @@ -20,6 +22,8 @@ setup: "is_outlier": false, "is_outlier_int": 0, "outlier_score": 0.2, + "regression_field_act": 12.0, + "regression_field_pred": 9.9, "all_true_field": true, "all_false_field": false } @@ -32,6 +36,8 @@ setup: "is_outlier": false, "is_outlier_int": 0, "outlier_score": 0.3, + "regression_field_act": 20.9, + "regression_field_pred": 5.9, "all_true_field": true, "all_false_field": false } @@ -44,6 +50,8 @@ setup: "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.3, + "regression_field_act": 11.9, + "regression_field_pred": 11.9, "all_true_field": true, "all_false_field": false } @@ -56,6 +64,8 @@ setup: "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.4, + "regression_field_act": 42.9, + "regression_field_pred": 42.9, "all_true_field": true, "all_false_field": false } @@ -68,6 +78,8 @@ setup: "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.5, + "regression_field_act": 0.42, + "regression_field_pred": 0.42, "all_true_field": true, "all_false_field": false } @@ -80,6 +92,8 @@ setup: "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.9, + "regression_field_act": 1.1235813, + "regression_field_pred": 1.12358, "all_true_field": true, "all_false_field": false } @@ -92,6 +106,8 @@ setup: "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.95, + "regression_field_act": -5.20, + "regression_field_pred": -5.1, "all_true_field": true, "all_false_field": false } @@ -109,7 +125,7 @@ setup: indices.refresh: {} --- -"Test binary_soft_classifition auc_roc": +"Test binary_soft_classification auc_roc": - do: ml.evaluate_data_frame: body: > @@ -129,7 +145,7 @@ setup: - is_false: binary_soft_classification.auc_roc.curve --- -"Test binary_soft_classifition auc_roc given actual_field is int": +"Test binary_soft_classification auc_roc given actual_field is int": - do: ml.evaluate_data_frame: body: > @@ -149,7 +165,7 @@ setup: - is_false: binary_soft_classification.auc_roc.curve --- -"Test binary_soft_classifition auc_roc include curve": +"Test binary_soft_classification auc_roc include curve": - do: ml.evaluate_data_frame: body: > @@ -169,7 +185,7 @@ setup: - is_true: binary_soft_classification.auc_roc.curve --- -"Test binary_soft_classifition auc_roc given actual_field is always true": +"Test binary_soft_classification auc_roc given actual_field is always true": - do: catch: /\[auc_roc\] requires at least one actual_field to have a different value than \[true\]/ ml.evaluate_data_frame: @@ -188,7 +204,7 @@ setup: } --- -"Test binary_soft_classifition auc_roc given actual_field is always false": +"Test binary_soft_classification auc_roc given actual_field is always false": - do: catch: /\[auc_roc\] requires at least one actual_field to have the value \[true\]/ ml.evaluate_data_frame: @@ -207,7 +223,7 @@ setup: } --- -"Test binary_soft_classifition precision": +"Test binary_soft_classification precision": - do: ml.evaluate_data_frame: body: > @@ -230,7 +246,7 @@ setup: '0.5': 1.0 --- -"Test binary_soft_classifition recall": +"Test binary_soft_classification recall": - do: ml.evaluate_data_frame: body: > @@ -254,7 +270,7 @@ setup: '0.5': 0.6 --- -"Test binary_soft_classifition confusion_matrix": +"Test binary_soft_classification confusion_matrix": - do: ml.evaluate_data_frame: body: > @@ -290,7 +306,7 @@ setup: fn: 2 --- -"Test binary_soft_classifition default metrics": +"Test binary_soft_classification default metrics": - do: ml.evaluate_data_frame: body: > @@ -356,7 +372,7 @@ setup: } --- -"Test binary_soft_classification given evaluation with emtpy metrics": +"Test binary_soft_classification given evaluation with empty metrics": - do: catch: /\[binary_soft_classification\] must have one or more metrics/ ml.evaluate_data_frame: @@ -518,3 +534,71 @@ setup: } } } +--- +"Test regression given evaluation with empty metrics": + - do: + catch: /\[regression\] must have one or more metrics/ + ml.evaluate_data_frame: + body: > + { + "index": "utopia", + "evaluation": { + "regression": { + "actual_field": "regression_field_act", + "predicted_field": "regression_field_pred", + "metrics": { } + } + } + } +--- +"Test regression mean_squared_error": + - do: + ml.evaluate_data_frame: + body: > + { + "index": "utopia", + "evaluation": { + "regression": { + "actual_field": "regression_field_act", + "predicted_field": "regression_field_pred", + "metrics": { "mean_squared_error": {} } + } + } + } + + - match: { regression.mean_squared_error.error: 28.67749840974834 } + - is_false: regression.r_squared.value +--- +"Test regression r_squared": + - do: + ml.evaluate_data_frame: + body: > + { + "index": "utopia", + "evaluation": { + "regression": { + "actual_field": "regression_field_act", + "predicted_field": "regression_field_pred", + "metrics": { "r_squared": {} } + } + } + } + - match: { regression.r_squared.value: 0.8551031778603486 } + - is_false: regression.mean_squared_error +--- +"Test regression with null metrics": + - do: + ml.evaluate_data_frame: + body: > + { + "index": "utopia", + "evaluation": { + "regression": { + "actual_field": "regression_field_act", + "predicted_field": "regression_field_pred" + } + } + } + + - match: { regression.mean_squared_error.error: 28.67749840974834 } + - match: { regression.r_squared.value: 0.8551031778603486 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml index 0496de1db477b..37e4245049756 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml @@ -175,19 +175,23 @@ setup: ml.start_datafeed: datafeed_id: "datafeed-1" start: 0 + - match: { started: true} - do: ml.get_datafeed_stats: datafeed_id: datafeed-1 - - match: { datafeeds.0.datafeed_id: "datafeed-1"} - - match: { datafeeds.0.state: "started"} - - match: { datafeeds.0.timing_stats.job_id: "get-datafeed-stats-1"} - - match: { datafeeds.0.timing_stats.search_count: 0} - - match: { datafeeds.0.timing_stats.total_search_time_ms: 0.0} + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.state: "started"} + - match: { datafeeds.0.timing_stats.job_id: "get-datafeed-stats-1"} + - match: { datafeeds.0.timing_stats.search_count: 0} + - match: { datafeeds.0.timing_stats.bucket_count: 0} + - match: { datafeeds.0.timing_stats.total_search_time_ms: 0.0} + - is_false: datafeeds.0.timing_stats.average_search_time_per_bucket_ms - do: ml.stop_datafeed: datafeed_id: "datafeed-1" + - match: { stopped: true} - do: ml.get_datafeed_stats: @@ -195,7 +199,9 @@ setup: - match: { datafeeds.0.datafeed_id: "datafeed-1"} - match: { datafeeds.0.state: "stopped"} - match: { datafeeds.0.timing_stats.job_id: "get-datafeed-stats-1"} - - match: { datafeeds.0.timing_stats.search_count: 1} + # We don't really know at this point if datafeed managed to perform at least one search, hence the very relaxed assertion + - gte: { datafeeds.0.timing_stats.search_count: 0} + - gte: { datafeeds.0.timing_stats.bucket_count: 0} - gte: { datafeeds.0.timing_stats.total_search_time_ms: 0.0} --- diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml index f52a2c21f773c..5638f7b2e0dac 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml @@ -101,6 +101,7 @@ setup: - is_true: jobs.0.open_time - match: { jobs.0.timing_stats.job_id: job-stats-test } - match: { jobs.0.timing_stats.bucket_count: 1 } # Records are 1h apart and bucket span is 1h so 1 bucket is produced + - gte: { jobs.0.timing_stats.total_bucket_processing_time_ms: 0.0 } - gte: { jobs.0.timing_stats.minimum_bucket_processing_time_ms: 0.0 } - gte: { jobs.0.timing_stats.maximum_bucket_processing_time_ms: 0.0 } - gte: { jobs.0.timing_stats.average_bucket_processing_time_ms: 0.0 } @@ -140,6 +141,7 @@ setup: - is_false: jobs.0.open_time - match: { jobs.0.timing_stats.job_id: job-stats-test } - match: { jobs.0.timing_stats.bucket_count: 1 } # Records are 1h apart and bucket span is 1h so 1 bucket is produced + - gte: { jobs.0.timing_stats.total_bucket_processing_time_ms: 0.0 } - gte: { jobs.0.timing_stats.minimum_bucket_processing_time_ms: 0.0 } - gte: { jobs.0.timing_stats.maximum_bucket_processing_time_ms: 0.0 } - gte: { jobs.0.timing_stats.average_bucket_processing_time_ms: 0.0 } @@ -158,6 +160,7 @@ setup: - is_true: jobs.0.open_time - match: { jobs.0.timing_stats.job_id: jobs-get-stats-datafeed-job } - match: { jobs.0.timing_stats.bucket_count: 0 } + - match: { jobs.0.timing_stats.total_bucket_processing_time_ms: 0.0 } - is_false: jobs.0.timing_stats.minimum_bucket_processing_time_ms - is_false: jobs.0.timing_stats.maximum_bucket_processing_time_ms - is_false: jobs.0.timing_stats.average_bucket_processing_time_ms @@ -342,6 +345,7 @@ setup: - is_false: jobs.0.open_time - match: { jobs.0.timing_stats.job_id: job-stats-test } - match: { jobs.0.timing_stats.bucket_count: 0 } + - match: { jobs.0.timing_stats.total_bucket_processing_time_ms: 0.0 } - is_false: jobs.0.timing_stats.minimum_bucket_processing_time_ms - is_false: jobs.0.timing_stats.maximum_bucket_processing_time_ms - is_false: jobs.0.timing_stats.average_bucket_processing_time_ms @@ -356,6 +360,7 @@ setup: - is_false: jobs.1.open_time - match: { jobs.1.timing_stats.job_id: jobs-get-stats-datafeed-job } - match: { jobs.1.timing_stats.bucket_count: 0 } + - match: { jobs.1.timing_stats.total_bucket_processing_time_ms: 0.0 } - is_false: jobs.1.timing_stats.minimum_bucket_processing_time_ms - is_false: jobs.1.timing_stats.maximum_bucket_processing_time_ms - is_false: jobs.1.timing_stats.average_bucket_processing_time_ms diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml index 66a2c801ff79f..2e23a85b7e737 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml @@ -15,5 +15,5 @@ setup: # This is fragile - it needs to be updated every time we add a new cluster/index privilege # I would much prefer we could just check that specific entries are in the array, but we don't have # an assertion for that - - length: { "cluster" : 26 } + - length: { "cluster" : 28 } - length: { "index" : 16 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/15_dense_vector_l1l2.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/15_dense_vector_l1l2.yml new file mode 100644 index 0000000000000..5845c17f5a080 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/15_dense_vector_l1l2.yml @@ -0,0 +1,102 @@ +setup: + - skip: + features: headers + version: " - 7.3.99" + reason: "l1norm and l2norm functions were added from 7.4" + + - do: + indices.create: + include_type_name: false + index: test-index + body: + settings: + number_of_replicas: 0 + mappings: + properties: + my_dense_vector: + type: dense_vector + dims: 5 + - do: + index: + index: test-index + id: 1 + body: + my_dense_vector: [230.0, 300.33, -34.8988, 15.555, -200.0] + + - do: + index: + index: test-index + id: 2 + body: + my_dense_vector: [-0.5, 100.0, -13, 14.8, -156.0] + + - do: + index: + index: test-index + id: 3 + body: + my_dense_vector: [0.5, 111.3, -13.0, 14.8, -156.0] + + - do: + indices.refresh: {} + + +--- +"L1 norm": + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l1norm(params.query_vector, doc['my_dense_vector'])" + params: + query_vector: [0.5, 111.3, -13.0, 14.8, -156.0] + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "1"} + - gte: {hits.hits.0._score: 485.18} + - lte: {hits.hits.0._score: 485.19} + + - match: {hits.hits.1._id: "2"} + - gte: {hits.hits.1._score: 12.29} + - lte: {hits.hits.1._score: 12.30} + + - match: {hits.hits.2._id: "3"} + - gte: {hits.hits.2._score: 0.00} + - lte: {hits.hits.2._score: 0.01} + +--- +"L2 norm": + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l2norm(params.query_vector, doc['my_dense_vector'])" + params: + query_vector: [0.5, 111.3, -13.0, 14.8, -156.0] + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "1"} + - gte: {hits.hits.0._score: 301.36} + - lte: {hits.hits.0._score: 301.37} + + - match: {hits.hits.1._id: "2"} + - gte: {hits.hits.1._score: 11.34} + - lte: {hits.hits.1._score: 11.35} + + - match: {hits.hits.2._id: "3"} + - gte: {hits.hits.2._score: 0.00} + - lte: {hits.hits.2._score: 0.01} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/35_sparse_vector_l1l2.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/35_sparse_vector_l1l2.yml new file mode 100644 index 0000000000000..05d210df7578a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/35_sparse_vector_l1l2.yml @@ -0,0 +1,101 @@ +setup: + - skip: + features: headers + version: " - 7.3.99" + reason: "l1norm and l2norm functions were added from 7.4" + + - do: + indices.create: + include_type_name: false + index: test-index + body: + settings: + number_of_replicas: 0 + mappings: + properties: + my_sparse_vector: + type: sparse_vector + - do: + index: + index: test-index + id: 1 + body: + my_sparse_vector: {"2": 230.0, "10" : 300.33, "50": -34.8988, "113": 15.555, "4545": -200.0} + + - do: + index: + index: test-index + id: 2 + body: + my_sparse_vector: {"2": -0.5, "10" : 100.0, "50": -13, "113": 14.8, "4545": -156.0} + + - do: + index: + index: test-index + id: 3 + body: + my_sparse_vector: {"2": 0.5, "10" : 111.3, "50": -13.0, "113": 14.8, "4545": -156.0} + + - do: + indices.refresh: {} + +--- +"L1 norm": + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l1normSparse(params.query_vector, doc['my_sparse_vector'])" + params: + query_vector: {"2": 0.5, "10" : 111.3, "50": -13.0, "113": 14.8, "4545": -156.0} + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "1"} + - gte: {hits.hits.0._score: 485.18} + - lte: {hits.hits.0._score: 485.19} + + - match: {hits.hits.1._id: "2"} + - gte: {hits.hits.1._score: 12.29} + - lte: {hits.hits.1._score: 12.30} + + - match: {hits.hits.2._id: "3"} + - gte: {hits.hits.2._score: 0.00} + - lte: {hits.hits.2._score: 0.01} + + +--- +"L2 norm": + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l2normSparse(params.query_vector, doc['my_sparse_vector'])" + params: + query_vector: {"2": 0.5, "10" : 111.3, "50": -13.0, "113": 14.8, "4545": -156.0} + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "1"} + - gte: {hits.hits.0._score: 301.36} + - lte: {hits.hits.0._score: 301.37} + + - match: {hits.hits.1._id: "2"} + - gte: {hits.hits.1._score: 11.34} + - lte: {hits.hits.1._score: 11.35} + + - match: {hits.hits.2._id: "3"} + - gte: {hits.hits.2._score: 0.00} + - lte: {hits.hits.2._score: 0.01} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/40_sparse_vector_special_cases.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/40_sparse_vector_special_cases.yml index 7137afef0f40c..396d144aecee5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/40_sparse_vector_special_cases.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/vectors/40_sparse_vector_special_cases.yml @@ -220,3 +220,139 @@ setup: params: query_vector: [0.5, 111] - match: { error.root_cause.0.type: "script_exception" } + +--- +"Query vector has different dimensions from documents' vectors": +- do: + index: + index: test-index + id: 1 + body: + my_sparse_vector: {"1": 10} + +- do: + index: + index: test-index + id: 2 + body: + my_sparse_vector: {"1": 10, "10" : 10.5} + +- do: + index: + index: test-index + id: 3 + body: + my_sparse_vector: {"1": 10, "10" : 10.5, "100": 100.5} + +- do: + indices.refresh: {} + +- do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "dotProductSparse(params.query_vector, doc['my_sparse_vector'])" + params: + query_vector: {"1": 10, "5": 5} + +- match: {hits.total: 3} + +- match: {hits.hits.0._id: "1"} +- gte: {hits.hits.0._score: 99.99} +- lte: {hits.hits.0._score: 100.01} + +- match: {hits.hits.1._id: "2"} +- gte: {hits.hits.0._score: 99.99} +- lte: {hits.hits.0._score: 100.01} + +- match: {hits.hits.2._id: "3"} +- gte: {hits.hits.0._score: 99.99} +- lte: {hits.hits.0._score: 100.01} + + +- do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "cosineSimilaritySparse(params.query_vector, doc['my_sparse_vector'])" + params: + query_vector: {"1": 10, "5" : 5} + +- match: {hits.total: 3} + +- match: {hits.hits.0._id: "1"} +- gte: {hits.hits.0._score: 0.894} +- lte: {hits.hits.0._score: 0.895} + +- match: {hits.hits.1._id: "2"} +- gte: {hits.hits.1._score: 0.61} +- lte: {hits.hits.1._score: 0.62} + +- match: {hits.hits.2._id: "3"} +- gte: {hits.hits.2._score: 0.08} +- lte: {hits.hits.2._score: 0.09} + +- do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l1normSparse(params.query_vector, doc['my_sparse_vector'])" + params: + query_vector: {"1": 10, "5": 5} + +- match: {hits.total: 3} + +- match: {hits.hits.0._id: "3"} +- match: {hits.hits.0._score: 116} + +- match: {hits.hits.1._id: "2"} +- match: {hits.hits.1._score: 15.5} + +- match: {hits.hits.2._id: "1"} +- match: {hits.hits.2._score: 5} + +- do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l2normSparse(params.query_vector, doc['my_sparse_vector'])" + params: + query_vector: {"1": 10, "5": 5} + +- match: {hits.total: 3} + +- match: {hits.hits.0._id: "3"} +- gte: {hits.hits.0._score: 101.17} +- lte: {hits.hits.0._score: 101.18} + +- match: {hits.hits.1._id: "2"} +- gte: {hits.hits.1._score: 11.62} +- lte: {hits.hits.1._score: 11.63} + +- match: {hits.hits.2._id: "1"} +- gte: {hits.hits.2._score: 5.0} +- lte: {hits.hits.2._score: 5.0} diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java index 34a1ae2c12a3c..10631aba4ce2d 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java @@ -20,6 +20,52 @@ public class ScoreScriptUtils { //**************FUNCTIONS FOR DENSE VECTORS + /** + * Calculate l1 norm - Manhattan distance + * between a query's dense vector and documents' dense vectors + * + * @param queryVector the query vector parsed as {@code List} from json + * @param dvs VectorScriptDocValues representing encoded documents' vectors + */ + public static double l1norm(List queryVector, VectorScriptDocValues.DenseVectorScriptDocValues dvs){ + BytesRef value = dvs.getEncodedValue(); + float[] docVector = VectorEncoderDecoder.decodeDenseVector(value); + if (queryVector.size() != docVector.length) { + throw new IllegalArgumentException("Can't calculate l1norm! The number of dimensions of the query vector [" + + queryVector.size() + "] is different from the documents' vectors [" + docVector.length + "]."); + } + Iterator queryVectorIter = queryVector.iterator(); + double l1norm = 0; + for (int dim = 0; dim < docVector.length; dim++){ + l1norm += Math.abs(queryVectorIter.next().doubleValue() - docVector[dim]); + } + return l1norm; + } + + /** + * Calculate l2 norm - Euclidean distance + * between a query's dense vector and documents' dense vectors + * + * @param queryVector the query vector parsed as {@code List} from json + * @param dvs VectorScriptDocValues representing encoded documents' vectors + */ + public static double l2norm(List queryVector, VectorScriptDocValues.DenseVectorScriptDocValues dvs){ + BytesRef value = dvs.getEncodedValue(); + float[] docVector = VectorEncoderDecoder.decodeDenseVector(value); + if (queryVector.size() != docVector.length) { + throw new IllegalArgumentException("Can't calculate l2norm! The number of dimensions of the query vector [" + + queryVector.size() + "] is different from the documents' vectors [" + docVector.length + "]."); + } + Iterator queryVectorIter = queryVector.iterator(); + double l2norm = 0; + for (int dim = 0; dim < docVector.length; dim++){ + double diff = queryVectorIter.next().doubleValue() - docVector[dim]; + l2norm += diff * diff; + } + return Math.sqrt(l2norm); + } + + /** * Calculate a dot product between a query's dense vector and documents' dense vectors * @@ -92,25 +138,17 @@ private static double intDotProduct(List v1, float[] v2){ //**************FUNCTIONS FOR SPARSE VECTORS - /** - * Calculate a dot product between a query's sparse vector and documents' sparse vectors - * - * DotProductSparse is implemented as a class to use - * painless script caching to prepare queryVector - * only once per script execution for all documents. - * A user will call `dotProductSparse(params.queryVector, doc['my_vector'])` - */ - public static final class DotProductSparse { + public static class VectorSparseFunctions { final double[] queryValues; final int[] queryDims; // prepare queryVector once per script execution // queryVector represents a map of dimensions to values - public DotProductSparse(Map queryVector) { + public VectorSparseFunctions(Map queryVector) { //break vector into two arrays dims and values int n = queryVector.size(); - queryDims = new int[n]; queryValues = new double[n]; + queryDims = new int[n]; int i = 0; for (Map.Entry dimValue : queryVector.entrySet()) { try { @@ -124,6 +162,115 @@ public DotProductSparse(Map queryVector) { // Sort dimensions in the ascending order and sort values in the same order as their corresponding dimensions sortSparseDimsDoubleValues(queryDims, queryValues, n); } + } + + /** + * Calculate l1 norm - Manhattan distance + * between a query's sparse vector and documents' sparse vectors + * + * L1NormSparse is implemented as a class to use + * painless script caching to prepare queryVector + * only once per script execution for all documents. + * A user will call `l1normSparse(params.queryVector, doc['my_vector'])` + */ + public static final class L1NormSparse extends VectorSparseFunctions { + public L1NormSparse(Map queryVector) { + super(queryVector); + } + + public double l1normSparse(VectorScriptDocValues.SparseVectorScriptDocValues dvs) { + BytesRef value = dvs.getEncodedValue(); + int[] docDims = VectorEncoderDecoder.decodeSparseVectorDims(value); + float[] docValues = VectorEncoderDecoder.decodeSparseVector(value); + int queryIndex = 0; + int docIndex = 0; + double l1norm = 0; + while (queryIndex < queryDims.length && docIndex < docDims.length) { + if (queryDims[queryIndex] == docDims[docIndex]) { + l1norm += Math.abs(queryValues[queryIndex] - docValues[docIndex]); + queryIndex++; + docIndex++; + } else if (queryDims[queryIndex] > docDims[docIndex]) { + l1norm += Math.abs(docValues[docIndex]); // 0 for missing query dim + docIndex++; + } else { + l1norm += Math.abs(queryValues[queryIndex]); // 0 for missing doc dim + queryIndex++; + } + } + while (queryIndex < queryDims.length) { + l1norm += Math.abs(queryValues[queryIndex]); // 0 for missing doc dim + queryIndex++; + } + while (docIndex < docDims.length) { + l1norm += Math.abs(docValues[docIndex]); // 0 for missing query dim + docIndex++; + } + return l1norm; + } + } + + /** + * Calculate l2 norm - Euclidean distance + * between a query's sparse vector and documents' sparse vectors + * + * L2NormSparse is implemented as a class to use + * painless script caching to prepare queryVector + * only once per script execution for all documents. + * A user will call `l2normSparse(params.queryVector, doc['my_vector'])` + */ + public static final class L2NormSparse extends VectorSparseFunctions { + public L2NormSparse(Map queryVector) { + super(queryVector); + } + + public double l2normSparse(VectorScriptDocValues.SparseVectorScriptDocValues dvs) { + BytesRef value = dvs.getEncodedValue(); + int[] docDims = VectorEncoderDecoder.decodeSparseVectorDims(value); + float[] docValues = VectorEncoderDecoder.decodeSparseVector(value); + int queryIndex = 0; + int docIndex = 0; + double l2norm = 0; + while (queryIndex < queryDims.length && docIndex < docDims.length) { + if (queryDims[queryIndex] == docDims[docIndex]) { + double diff = queryValues[queryIndex] - docValues[docIndex]; + l2norm += diff * diff; + queryIndex++; + docIndex++; + } else if (queryDims[queryIndex] > docDims[docIndex]) { + double diff = docValues[docIndex]; // 0 for missing query dim + l2norm += diff * diff; + docIndex++; + } else { + double diff = queryValues[queryIndex]; // 0 for missing doc dim + l2norm += diff * diff; + queryIndex++; + } + } + while (queryIndex < queryDims.length) { + l2norm += queryValues[queryIndex] * queryValues[queryIndex]; // 0 for missing doc dims + queryIndex++; + } + while (docIndex < docDims.length) { + l2norm += docValues[docIndex]* docValues[docIndex]; // 0 for missing query dims + docIndex++; + } + return Math.sqrt(l2norm); + } + } + + /** + * Calculate a dot product between a query's sparse vector and documents' sparse vectors + * + * DotProductSparse is implemented as a class to use + * painless script caching to prepare queryVector + * only once per script execution for all documents. + * A user will call `dotProductSparse(params.queryVector, doc['my_vector'])` + */ + public static final class DotProductSparse extends VectorSparseFunctions { + public DotProductSparse(Map queryVector) { + super(queryVector); + } public double dotProductSparse(VectorScriptDocValues.SparseVectorScriptDocValues dvs) { BytesRef value = dvs.getEncodedValue(); @@ -141,32 +288,16 @@ public double dotProductSparse(VectorScriptDocValues.SparseVectorScriptDocValues * only once per script execution for all documents. * A user will call `cosineSimilaritySparse(params.queryVector, doc['my_vector'])` */ - public static final class CosineSimilaritySparse { - final double[] queryValues; - final int[] queryDims; + public static final class CosineSimilaritySparse extends VectorSparseFunctions { final double queryVectorMagnitude; - // prepare queryVector once per script execution public CosineSimilaritySparse(Map queryVector) { - //break vector into two arrays dims and values - int n = queryVector.size(); - queryValues = new double[n]; - queryDims = new int[n]; + super(queryVector); double dotProduct = 0; - int i = 0; - for (Map.Entry dimValue : queryVector.entrySet()) { - try { - queryDims[i] = Integer.parseInt(dimValue.getKey()); - } catch (final NumberFormatException e) { - throw new IllegalArgumentException("Failed to parse a query vector dimension, it must be an integer!", e); - } - queryValues[i] = dimValue.getValue().doubleValue(); + for (int i = 0; i< queryDims.length; i++) { dotProduct += queryValues[i] * queryValues[i]; - i++; } this.queryVectorMagnitude = Math.sqrt(dotProduct); - // Sort dimensions in the ascending order and sort values in the same order as their corresponding dimensions - sortSparseDimsDoubleValues(queryDims, queryValues, n); } public double cosineSimilaritySparse(VectorScriptDocValues.SparseVectorScriptDocValues dvs) { diff --git a/x-pack/plugin/vectors/src/main/resources/org/elasticsearch/xpack/vectors/query/whitelist.txt b/x-pack/plugin/vectors/src/main/resources/org/elasticsearch/xpack/vectors/query/whitelist.txt index d385744e88fd5..252d4356f9ca1 100644 --- a/x-pack/plugin/vectors/src/main/resources/org/elasticsearch/xpack/vectors/query/whitelist.txt +++ b/x-pack/plugin/vectors/src/main/resources/org/elasticsearch/xpack/vectors/query/whitelist.txt @@ -11,8 +11,12 @@ class org.elasticsearch.xpack.vectors.query.VectorScriptDocValues$SparseVectorSc } static_import { + double l1norm(List, VectorScriptDocValues.DenseVectorScriptDocValues) from_class org.elasticsearch.xpack.vectors.query.ScoreScriptUtils + double l2norm(List, VectorScriptDocValues.DenseVectorScriptDocValues) from_class org.elasticsearch.xpack.vectors.query.ScoreScriptUtils double cosineSimilarity(List, VectorScriptDocValues.DenseVectorScriptDocValues) bound_to org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$CosineSimilarity double dotProduct(List, VectorScriptDocValues.DenseVectorScriptDocValues) from_class org.elasticsearch.xpack.vectors.query.ScoreScriptUtils + double l1normSparse(Map, VectorScriptDocValues.SparseVectorScriptDocValues) bound_to org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$L1NormSparse + double l2normSparse(Map, VectorScriptDocValues.SparseVectorScriptDocValues) bound_to org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$L2NormSparse double dotProductSparse(Map, VectorScriptDocValues.SparseVectorScriptDocValues) bound_to org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$DotProductSparse double cosineSimilaritySparse(Map, VectorScriptDocValues.SparseVectorScriptDocValues) bound_to org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$CosineSimilaritySparse } \ No newline at end of file diff --git a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtilsTests.java b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtilsTests.java index 699a9b09fb537..f9bb87ece0ca8 100644 --- a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtilsTests.java +++ b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtilsTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.CosineSimilarity; import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.DotProductSparse; import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.CosineSimilaritySparse; +import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.L1NormSparse; +import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.L2NormSparse; import java.util.Arrays; import java.util.HashMap; @@ -20,6 +22,9 @@ import static org.elasticsearch.xpack.vectors.mapper.VectorEncoderDecoderTests.mockEncodeDenseVector; import static org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.dotProduct; +import static org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.l1norm; +import static org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.l2norm; + import static org.hamcrest.Matchers.containsString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -35,12 +40,20 @@ public void testDenseVectorFunctions() { // test dotProduct double result = dotProduct(queryVector, dvs); - assertEquals("dotProduct result is not equal to the expected value!", 65425.62, result, 0.1); + assertEquals("dotProduct result is not equal to the expected value!", 65425.626, result, 0.001); // test cosineSimilarity CosineSimilarity cosineSimilarity = new CosineSimilarity(queryVector); double result2 = cosineSimilarity.cosineSimilarity(dvs); - assertEquals("cosineSimilarity result is not equal to the expected value!", 0.78, result2, 0.1); + assertEquals("cosineSimilarity result is not equal to the expected value!", 0.790, result2, 0.001); + + // test l1Norm + double result3 = l1norm(queryVector, dvs); + assertEquals("l1norm result is not equal to the expected value!", 485.184, result3, 0.001); + + // test l2norm + double result4 = l2norm(queryVector, dvs); + assertEquals("l2norm result is not equal to the expected value!", 301.361, result4, 0.001); // test dotProduct fails when queryVector has wrong number of dims List invalidQueryVector = Arrays.asList(0.5, 111.3); @@ -52,6 +65,13 @@ public void testDenseVectorFunctions() { e = expectThrows(IllegalArgumentException.class, () -> cosineSimilarity2.cosineSimilarity(dvs)); assertThat(e.getMessage(), containsString("dimensions of the query vector [2] is different from the documents' vectors [5]")); + // test l1norm fails when queryVector has wrong number of dims + e = expectThrows(IllegalArgumentException.class, () -> l1norm(invalidQueryVector, dvs)); + assertThat(e.getMessage(), containsString("dimensions of the query vector [2] is different from the documents' vectors [5]")); + + // test l2norm fails when queryVector has wrong number of dims + e = expectThrows(IllegalArgumentException.class, () -> l2norm(invalidQueryVector, dvs)); + assertThat(e.getMessage(), containsString("dimensions of the query vector [2] is different from the documents' vectors [5]")); } public void testSparseVectorFunctions() { @@ -71,11 +91,95 @@ public void testSparseVectorFunctions() { // test dotProduct DotProductSparse docProductSparse = new DotProductSparse(queryVector); double result = docProductSparse.dotProductSparse(dvs); - assertEquals("dotProductSparse result is not equal to the expected value!", 65425.62, result, 0.1); + assertEquals("dotProductSparse result is not equal to the expected value!", 65425.626, result, 0.001); + + // test cosineSimilarity + CosineSimilaritySparse cosineSimilaritySparse = new CosineSimilaritySparse(queryVector); + double result2 = cosineSimilaritySparse.cosineSimilaritySparse(dvs); + assertEquals("cosineSimilaritySparse result is not equal to the expected value!", 0.790, result2, 0.001); + + // test l1norm + L1NormSparse l1Norm = new L1NormSparse(queryVector); + double result3 = l1Norm.l1normSparse(dvs); + assertEquals("l1normSparse result is not equal to the expected value!", 485.184, result3, 0.001); + + // test l2norm + L2NormSparse l2Norm = new L2NormSparse(queryVector); + double result4 = l2Norm.l2normSparse(dvs); + assertEquals("l2normSparse result is not equal to the expected value!", 301.361, result4, 0.001); + } + + public void testSparseVectorMissingDimensions1() { + // Document vector's biggest dimension > query vector's biggest dimension + int[] docVectorDims = {2, 10, 50, 113, 4545, 4546}; + float[] docVectorValues = {230.0f, 300.33f, -34.8988f, 15.555f, -200.0f, 11.5f}; + BytesRef encodedDocVector = VectorEncoderDecoder.encodeSparseVector(docVectorDims, docVectorValues, docVectorDims.length); + VectorScriptDocValues.SparseVectorScriptDocValues dvs = mock(VectorScriptDocValues.SparseVectorScriptDocValues.class); + when(dvs.getEncodedValue()).thenReturn(encodedDocVector); + Map queryVector = new HashMap() {{ + put("2", 0.5); + put("10", 111.3); + put("50", -13.0); + put("113", 14.8); + put("114", -20.5); + put("4545", -156.0); + }}; + + // test dotProduct + DotProductSparse docProductSparse = new DotProductSparse(queryVector); + double result = docProductSparse.dotProductSparse(dvs); + assertEquals("dotProductSparse result is not equal to the expected value!", 65425.626, result, 0.001); + + // test cosineSimilarity + CosineSimilaritySparse cosineSimilaritySparse = new CosineSimilaritySparse(queryVector); + double result2 = cosineSimilaritySparse.cosineSimilaritySparse(dvs); + assertEquals("cosineSimilaritySparse result is not equal to the expected value!", 0.786, result2, 0.001); + + // test l1norm + L1NormSparse l1Norm = new L1NormSparse(queryVector); + double result3 = l1Norm.l1normSparse(dvs); + assertEquals("l1normSparse result is not equal to the expected value!", 517.184, result3, 0.001); + + // test l2norm + L2NormSparse l2Norm = new L2NormSparse(queryVector); + double result4 = l2Norm.l2normSparse(dvs); + assertEquals("l2normSparse result is not equal to the expected value!", 302.277, result4, 0.001); + } + + public void testSparseVectorMissingDimensions2() { + // Document vector's biggest dimension < query vector's biggest dimension + int[] docVectorDims = {2, 10, 50, 113, 4545, 4546}; + float[] docVectorValues = {230.0f, 300.33f, -34.8988f, 15.555f, -200.0f, 11.5f}; + BytesRef encodedDocVector = VectorEncoderDecoder.encodeSparseVector(docVectorDims, docVectorValues, docVectorDims.length); + VectorScriptDocValues.SparseVectorScriptDocValues dvs = mock(VectorScriptDocValues.SparseVectorScriptDocValues.class); + when(dvs.getEncodedValue()).thenReturn(encodedDocVector); + Map queryVector = new HashMap() {{ + put("2", 0.5); + put("10", 111.3); + put("50", -13.0); + put("113", 14.8); + put("4545", -156.0); + put("4548", -20.5); + }}; + + // test dotProduct + DotProductSparse docProductSparse = new DotProductSparse(queryVector); + double result = docProductSparse.dotProductSparse(dvs); + assertEquals("dotProductSparse result is not equal to the expected value!", 65425.626, result, 0.001); // test cosineSimilarity CosineSimilaritySparse cosineSimilaritySparse = new CosineSimilaritySparse(queryVector); double result2 = cosineSimilaritySparse.cosineSimilaritySparse(dvs); - assertEquals("cosineSimilaritySparse result is not equal to the expected value!", 0.78, result2, 0.1); + assertEquals("cosineSimilaritySparse result is not equal to the expected value!", 0.786, result2, 0.001); + + // test l1norm + L1NormSparse l1Norm = new L1NormSparse(queryVector); + double result3 = l1Norm.l1normSparse(dvs); + assertEquals("l1normSparse result is not equal to the expected value!", 517.184, result3, 0.001); + + // test l2norm + L2NormSparse l2Norm = new L2NormSparse(queryVector); + double result4 = l2Norm.l2normSparse(dvs); + assertEquals("l2normSparse result is not equal to the expected value!", 302.277, result4, 0.001); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 1b8fcc2658f79..73d491de725d0 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -330,14 +330,23 @@ public void beforeBulk(long executionId, BulkRequest request) { @Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (response.hasFailures()) { - Map triggeredWatches = Arrays.stream(response.getItems()) + Map triggeredFailures = Arrays.stream(response.getItems()) .filter(BulkItemResponse::isFailed) .filter(r -> r.getIndex().startsWith(TriggeredWatchStoreField.INDEX_NAME)) .collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage)); - if (triggeredWatches.isEmpty() == false) { - String failure = triggeredWatches.values().stream().collect(Collectors.joining(", ")); + Map historyFailures = Arrays.stream(response.getItems()) + .filter(BulkItemResponse::isFailed) + .filter(r -> r.getIndex().startsWith(HistoryStoreField.INDEX_PREFIX)) + .collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage)); + if (triggeredFailures.isEmpty() == false) { + String failure = triggeredFailures.values().stream().collect(Collectors.joining(", ")); logger.error("triggered watches could not be deleted {}, failure [{}]", - triggeredWatches.keySet(), Strings.substring(failure, 0, 2000)); + triggeredFailures.keySet(), Strings.substring(failure, 0, 2000)); + } + if (historyFailures.isEmpty() == false) { + String failure = historyFailures.values().stream().collect(Collectors.joining(", ")); + logger.error("watch history could not be written {}, failure [{}]", + historyFailures.keySet(), Strings.substring(failure, 0, 2000)); } Map overwrittenIds = Arrays.stream(response.getItems()) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java index 2f97fcb4f18aa..6b3b5703bdc42 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java @@ -21,8 +21,6 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchAction; import org.elasticsearch.xpack.core.watcher.watch.Watch; -import java.util.function.Supplier; - import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -36,7 +34,7 @@ public class TransportDeleteWatchAction extends HandledTransportAction) DeleteWatchRequest::new); + super(DeleteWatchAction.NAME, transportService, actionFilters, DeleteWatchRequest::new); this.client = client; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java index a992a9a3675c3..675d04dc3d0f1 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java @@ -51,7 +51,7 @@ public TransportWatcherServiceAction(TransportService transportService, ClusterS ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(WatcherServiceAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, WatcherServiceRequest::new); + WatcherServiceRequest::new, indexNameExpressionResolver); } @Override @@ -64,11 +64,6 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { return new AcknowledgedResponse(in); } - @Override - protected AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected void masterOperation(Task task, WatcherServiceRequest request, ClusterState state, ActionListener listener) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java index 39de29f5eaf40..c1b0a9cd0093f 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -22,6 +23,7 @@ import org.elasticsearch.xpack.watcher.execution.ExecutionService; import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -58,8 +60,8 @@ protected WatcherStatsRequest.Node newNodeRequest(WatcherStatsRequest request) { } @Override - protected WatcherStatsResponse.Node newNodeResponse() { - return new WatcherStatsResponse.Node(); + protected WatcherStatsResponse.Node newNodeResponse(StreamInput in) throws IOException { + return new WatcherStatsResponse.Node(in); } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java index e5cf3a54a4c66..c03d924cd6faa 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java @@ -150,7 +150,7 @@ private static List getProtocols() { } else { JavaVersion full = AccessController.doPrivileged( - (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.specification.version"))); + (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { return List.of("TLSv1.2"); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java index d24bba0b1ba57..cc1fdec18bafe 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java @@ -759,7 +759,7 @@ private static List getProtocols() { } else { JavaVersion full = AccessController.doPrivileged( - (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.specification.version"))); + (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { return List.of("TLSv1.2"); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index 40478f45dfa8d..53f060b22c3e1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -1098,7 +1098,7 @@ public void testUpdateWatchStatusDoesNotUpdateState() throws Exception { } PlainActionFuture future = PlainActionFuture.newFuture(); - future.onResponse(new UpdateResponse()); + future.onResponse(new UpdateResponse(null, null, null, null, 0, 0, 0, null)); return future; }).when(client).update(any()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 7ad5b5e98bf4a..ad288632cdc03 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -53,7 +53,7 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.core.watcher.watch.Watch; -import org.elasticsearch.xpack.indexlifecycle.IndexLifecycle; +import org.elasticsearch.xpack.ilm.IndexLifecycle; import org.elasticsearch.xpack.watcher.ClockHolder; import org.elasticsearch.xpack.watcher.notification.email.Authentication; import org.elasticsearch.xpack.watcher.notification.email.Email; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/WatchRequestValidationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/WatchRequestValidationTests.java index 62985cad1f144..c29de953adbb8 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/WatchRequestValidationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/WatchRequestValidationTests.java @@ -63,7 +63,7 @@ public void testDeleteWatchInvalidWatchId() { } public void testDeleteWatchNullId() { - ActionRequestValidationException e = new DeleteWatchRequest(null).validate(); + ActionRequestValidationException e = new DeleteWatchRequest().validate(); assertThat(e, is(notNullValue())); assertThat(e.validationErrors(), hasItem("watch id is missing")); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java index eef257ce46b25..e58c5edcb1948 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java @@ -3,8 +3,8 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.watcher.transport.action.activate; +package org.elasticsearch.xpack.watcher.transport.action.activate; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexResponse; @@ -16,7 +16,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchRequestBuilder; @@ -43,8 +42,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@TestLogging("org.elasticsearch.xpack.watcher:DEBUG,org.elasticsearch.xpack.core.watcher:DEBUG," + - "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") public class ActivateWatchTests extends AbstractWatcherIntegrationTestCase { @Override @@ -163,4 +160,5 @@ public void testLoadWatchWithoutAState() throws Exception { assertThat(getWatchResponse.getStatus().state(), notNullValue()); assertThat(getWatchResponse.getStatus().state().isActive(), is(true)); } + } diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle index c9c444ee9f115..19bb678abd497 100644 --- a/x-pack/qa/core-rest-tests-with-security/build.gradle +++ b/x-pack/qa/core-rest-tests-with-security/build.gradle @@ -21,7 +21,7 @@ integTest { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java new file mode 100644 index 0000000000000..ced66761d0265 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractFullClusterRestartTestCase { + + private static final String OLD_CLUSTER_JOB_ID = "ml-config-mappings-old-cluster-job"; + private static final String NEW_CLUSTER_JOB_ID = "ml-config-mappings-new-cluster-job"; + + private static final Map EXPECTED_DATA_FRAME_ANALYSIS_MAPPINGS = + mapOf( + "properties", mapOf( + "outlier_detection", mapOf( + "properties", mapOf( + "method", mapOf("type", "keyword"), + "n_neighbors", mapOf("type", "integer"), + "feature_influence_threshold", mapOf("type", "double"))))); + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Before + public void waitForMlTemplates() throws Exception { + List templatesToWaitFor = XPackRestTestConstants.ML_POST_V660_TEMPLATES; + XPackRestTestHelper.waitForTemplates(client(), templatesToWaitFor); + } + + public void testMlConfigIndexMappingsAfterMigration() throws Exception { + if (isRunningAgainstOldCluster()) { + assertThatMlConfigIndexDoesNotExist(); + // trigger .ml-config index creation + createAnomalyDetectorJob(OLD_CLUSTER_JOB_ID); + if (getOldClusterVersion().onOrAfter(Version.V_7_3_0)) { + // .ml-config has correct mappings from the start + assertThat(mappingsForDataFrameAnalysis(), is(equalTo(EXPECTED_DATA_FRAME_ANALYSIS_MAPPINGS))); + } else { + // .ml-config does not yet have correct mappings, it will need an update after cluster is upgraded + assertThat(mappingsForDataFrameAnalysis(), is(nullValue())); + } + } else { + // trigger .ml-config index mappings update + createAnomalyDetectorJob(NEW_CLUSTER_JOB_ID); + // assert that the mappings are updated + assertThat(mappingsForDataFrameAnalysis(), is(equalTo(EXPECTED_DATA_FRAME_ANALYSIS_MAPPINGS))); + } + } + + private void assertThatMlConfigIndexDoesNotExist() { + Request getIndexRequest = new Request("GET", ".ml-config"); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(getIndexRequest)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + + private void createAnomalyDetectorJob(String jobId) throws IOException { + Detector.Builder detector = new Detector.Builder("metric", "responsetime") + .setByFieldName("airline"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())) + .setBucketSpan(TimeValue.timeValueMinutes(10)); + Job.Builder job = new Job.Builder(jobId) + .setAnalysisConfig(analysisConfig) + .setDataDescription(new DataDescription.Builder()); + + Request putJobRequest = new Request("PUT", "/_ml/anomaly_detectors/" + jobId); + putJobRequest.setJsonEntity(Strings.toString(job)); + Response putJobResponse = client().performRequest(putJobRequest); + assertThat(putJobResponse.getStatusLine().getStatusCode(), equalTo(200)); + } + + @SuppressWarnings("unchecked") + private Map mappingsForDataFrameAnalysis() throws Exception { + Request getIndexMappingsRequest = new Request("GET", ".ml-config/_mappings"); + Response getIndexMappingsResponse = client().performRequest(getIndexMappingsRequest); + assertThat(getIndexMappingsResponse.getStatusLine().getStatusCode(), equalTo(200)); + + Map mappings = entityAsMap(getIndexMappingsResponse); + mappings = (Map) XContentMapValues.extractValue(mappings, ".ml-config", "mappings"); + if (mappings.containsKey("doc")) { + mappings = (Map) XContentMapValues.extractValue(mappings, "doc"); + } + mappings = (Map) XContentMapValues.extractValue(mappings, "properties", "analysis"); + return mappings; + } + + private static Map mapOf(K k1, V v1) { + Map map = new HashMap<>(); + map.put(k1, v1); + return map; + } + + private static Map mapOf(K k1, V v1, K k2, V v2, K k3, V v3) { + Map map = new HashMap<>(); + map.put(k1, v1); + map.put(k2, v2); + map.put(k3, v3); + return map; + } +} diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index 3095bb6364ffa..de72a9c80cce6 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -19,7 +19,7 @@ dependencies { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' // force localhost IPv4 otherwise it is a chicken and egg problem where we need the keytab for the hostname when starting the cluster // but do not know the exact address that is first in the http ports file setting 'http.host', '127.0.0.1' diff --git a/x-pack/qa/multi-cluster-search-security/build.gradle b/x-pack/qa/multi-cluster-search-security/build.gradle index 4efcbdae8836b..3f3d03a7e685e 100644 --- a/x-pack/qa/multi-cluster-search-security/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/build.gradle @@ -15,7 +15,7 @@ task 'remote-cluster'(type: RestIntegTestTask) { } testClusters.'remote-cluster' { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' numberOfNodes = 2 setting 'cluster.remote.connect', "false" setting 'xpack.ilm.enabled', 'false' @@ -37,7 +37,7 @@ task 'mixed-cluster'(type: RestIntegTestTask) { } testClusters.'mixed-cluster' { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle index 5bf248482aa7f..331ae0625271a 100644 --- a/x-pack/qa/multi-node/build.gradle +++ b/x-pack/qa/multi-node/build.gradle @@ -7,7 +7,7 @@ dependencies { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' numberOfNodes = 2 setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle index 32a77d2b98cb3..0ff52b7d3c5cc 100644 --- a/x-pack/qa/oidc-op-tests/build.gradle +++ b/x-pack/qa/oidc-op-tests/build.gradle @@ -30,7 +30,7 @@ integTest.runner { } testClusters.integTest { - distribution = 'DEFAULT' + testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.http.ssl.enabled', 'false' diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 1cedd1f5bf6c6..658dffe54a8c3 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -22,7 +22,7 @@ forbiddenPatterns { File caFile = project.file('src/test/resources/ssl/ca.p12') testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' // Whitelist reindexing from the local node so we can test it. extraConfigFile 'http.key', file('src/test/resources/ssl/http.key') extraConfigFile 'http.crt', file('src/test/resources/ssl/http.crt') diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml index 9454423e98953..0980924569583 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml @@ -1,5 +1,8 @@ --- "Test put batch data frame transforms on mixed cluster": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44590 - do: cluster.health: index: "dataframe-transform-airline-data" @@ -108,6 +111,9 @@ --- "Test put continuous data frame transform on mixed cluster": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44590 - do: cluster.health: index: "dataframe-transform-airline-data-cont" @@ -171,6 +177,9 @@ --- "Test GET, start, and stop old cluster batch transforms": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44590 - do: cluster.health: index: "dataframe-transform-airline-data" @@ -250,6 +259,9 @@ --- "Test GET, stop, start, old continuous transforms": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44590 - do: cluster.health: index: "dataframe-transform-airline-data-cont" diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml index 7b666c2caa35f..025c6d30c94c6 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml @@ -1,5 +1,8 @@ --- "Test put batch data frame transforms on old cluster": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44590 - do: indices.create: index: dataframe-transform-airline-data @@ -142,6 +145,9 @@ --- "Test put continuous data frame transform on old cluster": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44590 - do: indices.create: index: dataframe-transform-airline-data-cont diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml index ea63950d7fc64..37a47bf0a34c5 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml @@ -7,6 +7,9 @@ setup: timeout: 70s --- "Get start, stop, and delete old and mixed cluster batch data frame transforms": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44590 # Simple and complex OLD transforms - do: data_frame.get_data_frame_transform: @@ -166,6 +169,9 @@ setup: --- "Test GET, stop, delete, old and mixed continuous transforms": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44590 - do: data_frame.get_data_frame_transform: transform_id: "old-simple-continuous-transform" diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 9e8c15cb94150..4b8f3df4159b5 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -42,7 +42,7 @@ task setupPorts { integTest.runner.dependsOn setupPorts testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.http.ssl.enabled', 'false' diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index 311cf6c67d7f4..5f5b9f659c5fd 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -22,7 +22,7 @@ testClusters.integTest { // This is important, so that all the modules are available too. // There are index templates that use token filters that are in analysis-module and // processors are being used that are in ingest-common module. - distribution = 'default' + testDistribution = 'DEFAULT' setting 'xpack.security.authc.realms.custom.custom.order', '0' setting 'xpack.security.authc.realms.custom.custom.filtered_setting', 'should be filtered' diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle index ee77aab06bb99..291c0abd3fa09 100644 --- a/x-pack/qa/security-setup-password-tests/build.gradle +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -17,7 +17,7 @@ integTest.runner { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle index 9bb5d5899db6a..d380eb308f62f 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle @@ -7,7 +7,7 @@ dependencies { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.ilm.enabled', 'false' setting 'xpack.monitoring.enabled', 'true' setting 'xpack.watcher.enabled', 'true' diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 78c0356c5c2a4..d8e61645f52ff 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -43,7 +43,7 @@ integTest.runner.dependsOn(copyKeyCerts) def pluginsCount = 0 testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.monitoring.collection.interval', '1s' setting 'xpack.monitoring.exporters._http.type', 'http' setting 'xpack.monitoring.exporters._http.enabled', 'false' diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index 86fb4ee0f07d6..ded92bea09e72 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -10,7 +10,7 @@ dependencies { int pluginsCount = 0 testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' user username: "test_user", password: "x-pack-test-password" diff --git a/x-pack/qa/smoke-test-security-with-mustache/build.gradle b/x-pack/qa/smoke-test-security-with-mustache/build.gradle index 3edf8d22cbe1d..748252044c36c 100644 --- a/x-pack/qa/smoke-test-security-with-mustache/build.gradle +++ b/x-pack/qa/smoke-test-security-with-mustache/build.gradle @@ -7,7 +7,7 @@ dependencies { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle index d24c52b8d9005..9d9244821a3c0 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -15,7 +15,7 @@ task copyWatcherRestTests(type: Copy) { integTest.runner.dependsOn copyWatcherRestTests testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.ilm.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index d4ccba66f4966..7bc68fee9f499 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -7,7 +7,7 @@ dependencies { } testClusters.integTest { - distribution = "DEFAULT" + testDistribution = 'DEFAULT' setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml index 6ec0adcc93310..7461c85b79e73 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml @@ -123,6 +123,10 @@ --- "Test execute watch api with rest_total_hits_as_int": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/43889" + - do: cluster.health: wait_for_status: green diff --git a/x-pack/snapshot-tool/build.gradle b/x-pack/snapshot-tool/build.gradle new file mode 100644 index 0000000000000..76aea54cb81f5 --- /dev/null +++ b/x-pack/snapshot-tool/build.gradle @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import java.nio.file.Files +import java.nio.file.Paths +import org.elasticsearch.gradle.ElasticsearchDistribution + +apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.test.fixtures' + +dependencies { + compile project(":server") + compile project(":libs:elasticsearch-cli") + compile "com.amazonaws:aws-java-sdk-s3:${versions.aws}" + compile "com.amazonaws:aws-java-sdk-core:${versions.aws}" + compile "com.amazonaws:jmespath-java:${versions.aws}" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + compile 'com.fasterxml.jackson.core:jackson-databind:2.8.11.3' + compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + testCompile project(":test:framework") + testCompile project(":plugins:repository-s3") + + // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, + // and whitelist this hack in JarHell + compile 'javax.xml.bind:jaxb-api:2.2.2' +} + +dependencyLicenses { + mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk' + mapping from: /jmespath-java.*/, to: 'aws-java-sdk' + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /jaxb-.*/, to: 'jaxb' +} + +test { + // these are tested explicitly in separate test tasks + exclude '**/S3CleanupTests.class' +} + +// Disabled to quiet the testing convention check since we only run third party tests +test.enabled = false + +String s3PermanentAccessKey = 's3_integration_test_permanent_access_key' +String s3PermanentSecretKey = 's3_integration_test_permanent_secret_key' +String s3PermanentBucket = 'permanent-bucket-test' +String s3PermanentBasePath = 'integration_test' + +task thirdPartyTest(type: Test) { + include '**/S3CleanupTests.class' + systemProperty 'tests.security.manager', 'false' + systemProperty 'test.s3.account', s3PermanentAccessKey + systemProperty 'test.s3.key', s3PermanentSecretKey + systemProperty 'test.s3.bucket', s3PermanentBucket + systemProperty 'test.s3.base', s3PermanentBasePath +} + +task writeDockerFile { + File minioDockerfile = new File("${project.buildDir}/minio-docker/Dockerfile") + outputs.file(minioDockerfile) + doLast { + minioDockerfile.parentFile.mkdirs() + minioDockerfile.text = + "FROM minio/minio:RELEASE.2019-01-23T23-18-58Z\n" + + "RUN mkdir -p /minio/data/${s3PermanentBucket}\n" + + "ENV MINIO_ACCESS_KEY ${s3PermanentAccessKey}\n" + + "ENV MINIO_SECRET_KEY ${s3PermanentSecretKey}" + } +} + +preProcessFixture { + dependsOn(writeDockerFile) +} + +def minioAddress = { + int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" + assert minioPort > 0 + 'http://127.0.0.1:' + minioPort +} + +thirdPartyTest { + dependsOn tasks.postProcessFixture + nonInputProperties.systemProperty 'test.s3.endpoint', "${ -> minioAddress.call() }" +} + +task unpackArchive(dependsOn: tasks.assemble, type: Copy) { + from tarTree("${project.buildDir}/snapshot-tool-${project.version}.tgz") + into "${project.buildDir}" +} + +task smokeTest(type: Exec) { + dependsOn unpackArchive + onlyIf { ElasticsearchDistribution.CURRENT_PLATFORM != ElasticsearchDistribution.Platform.WINDOWS } + commandLine "${project.buildDir}/snapshot-tool-${project.version}/bin/elasticsearch-snapshot", "-h" +} + +check.dependsOn(thirdPartyTest) +check.dependsOn(smokeTest) + +def vendorPath = Paths.get("${project.buildDir}/libs/vendor") + +task copyRuntimeLibs(type: Copy) { + doFirst { + Files.createDirectories(vendorPath) + } + into vendorPath.toString() + from configurations.compile, configurations.runtime +} + +task buildTarArchive(dependsOn: copyRuntimeLibs, type: Tar) { + compression Compression.GZIP + archiveBaseName.set('snapshot-tool') + destinationDirectory.set(project.buildDir) + into "snapshot-tool-${version}", { + into "bin", { + from file("${project.projectDir}/src/bin") + } + into "libs", { + from jar.getArchiveFile() + from file("${project.buildDir}/libs") + } + } +} + +assemble.dependsOn(buildTarArchive) + +thirdPartyAudit.ignoreMissingClasses ( + // classes are missing + 'javax.activation.DataHandler', + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'software.amazon.ion.IonReader', + 'software.amazon.ion.IonSystem', + 'software.amazon.ion.IonType', + 'software.amazon.ion.IonWriter', + 'software.amazon.ion.Timestamp', + 'software.amazon.ion.system.IonBinaryWriterBuilder', + 'software.amazon.ion.system.IonSystemBuilder', + 'software.amazon.ion.system.IonTextWriterBuilder', + 'software.amazon.ion.system.IonWriterBuilder', + // We don't use the kms dependency + 'com.amazonaws.services.kms.AWSKMS', + 'com.amazonaws.services.kms.AWSKMSClient', + 'com.amazonaws.services.kms.model.DecryptRequest', + 'com.amazonaws.services.kms.model.DecryptResult', + 'com.amazonaws.services.kms.model.EncryptRequest', + 'com.amazonaws.services.kms.model.EncryptResult', + 'com.amazonaws.services.kms.model.GenerateDataKeyRequest', + 'com.amazonaws.services.kms.model.GenerateDataKeyResult' +) diff --git a/x-pack/snapshot-tool/docker-compose.yml b/x-pack/snapshot-tool/docker-compose.yml new file mode 100644 index 0000000000000..e44750550e271 --- /dev/null +++ b/x-pack/snapshot-tool/docker-compose.yml @@ -0,0 +1,9 @@ +version: '3' +services: + minio-fixture: + build: + context: ./build/minio-docker + dockerfile: Dockerfile + ports: + - "9000" + command: ["server", "/minio/data"] \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/aws-java-sdk-LICENSE.txt b/x-pack/snapshot-tool/licenses/aws-java-sdk-LICENSE.txt new file mode 100644 index 0000000000000..98d1f9319f374 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/aws-java-sdk-LICENSE.txt @@ -0,0 +1,63 @@ +Apache License +Version 2.0, January 2004 + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and + 2. You must cause any modified files to carry prominent notices stating that You changed the files; and + 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +Note: Other license terms may apply to certain, identified software files contained within or distributed with the accompanying software if such terms are included in the directory containing the accompanying software. Such other license terms will then apply in lieu of the terms of the software license above. + +JSON processing code subject to the JSON License from JSON.org: + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +The Software shall be used for Good, not Evil. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/x-pack/snapshot-tool/licenses/aws-java-sdk-NOTICE.txt b/x-pack/snapshot-tool/licenses/aws-java-sdk-NOTICE.txt new file mode 100644 index 0000000000000..565bd6085c71a --- /dev/null +++ b/x-pack/snapshot-tool/licenses/aws-java-sdk-NOTICE.txt @@ -0,0 +1,15 @@ +AWS SDK for Java +Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). + +********************** +THIRD PARTY COMPONENTS +********************** +This software includes third party software subject to the following copyrights: +- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. +- JSON parsing and utility functions from JSON.org - Copyright 2002 JSON.org. +- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. + +The licenses for these third party components are included in LICENSE.txt diff --git a/x-pack/snapshot-tool/licenses/aws-java-sdk-core-1.11.562.jar.sha1 b/x-pack/snapshot-tool/licenses/aws-java-sdk-core-1.11.562.jar.sha1 new file mode 100644 index 0000000000000..ed8ded6a3608c --- /dev/null +++ b/x-pack/snapshot-tool/licenses/aws-java-sdk-core-1.11.562.jar.sha1 @@ -0,0 +1 @@ +b5fc47ec1b5afe180f5ebb4eda755acdca7a20ae \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/aws-java-sdk-s3-1.11.562.jar.sha1 b/x-pack/snapshot-tool/licenses/aws-java-sdk-s3-1.11.562.jar.sha1 new file mode 100644 index 0000000000000..8e852fe9b275f --- /dev/null +++ b/x-pack/snapshot-tool/licenses/aws-java-sdk-s3-1.11.562.jar.sha1 @@ -0,0 +1 @@ +1712c878f7e9483ceac1eb2356a9457a3c8df03e \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/commons-codec-1.11.jar.sha1 b/x-pack/snapshot-tool/licenses/commons-codec-1.11.jar.sha1 new file mode 100644 index 0000000000000..b08f71a5babf0 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/commons-codec-1.11.jar.sha1 @@ -0,0 +1 @@ +3acb4705652e16236558f0f4f2192cc33c3bd189 \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/commons-codec-LICENSE.txt b/x-pack/snapshot-tool/licenses/commons-codec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/snapshot-tool/licenses/commons-codec-NOTICE.txt b/x-pack/snapshot-tool/licenses/commons-codec-NOTICE.txt new file mode 100644 index 0000000000000..56916449bbe10 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/x-pack/snapshot-tool/licenses/commons-logging-1.1.3.jar.sha1 b/x-pack/snapshot-tool/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 0000000000000..5b8f029e58293 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/commons-logging-LICENSE.txt b/x-pack/snapshot-tool/licenses/commons-logging-LICENSE.txt new file mode 100644 index 0000000000000..57bc88a15a0ee --- /dev/null +++ b/x-pack/snapshot-tool/licenses/commons-logging-LICENSE.txt @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/x-pack/snapshot-tool/licenses/commons-logging-NOTICE.txt b/x-pack/snapshot-tool/licenses/commons-logging-NOTICE.txt new file mode 100644 index 0000000000000..72eb32a902458 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). diff --git a/x-pack/snapshot-tool/licenses/httpclient-4.5.8.jar.sha1 b/x-pack/snapshot-tool/licenses/httpclient-4.5.8.jar.sha1 new file mode 100644 index 0000000000000..73f0d30c7093b --- /dev/null +++ b/x-pack/snapshot-tool/licenses/httpclient-4.5.8.jar.sha1 @@ -0,0 +1 @@ +c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/httpclient-LICENSE.txt b/x-pack/snapshot-tool/licenses/httpclient-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/x-pack/snapshot-tool/licenses/httpclient-NOTICE.txt b/x-pack/snapshot-tool/licenses/httpclient-NOTICE.txt new file mode 100644 index 0000000000000..4f6058178b201 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/httpclient-NOTICE.txt @@ -0,0 +1,5 @@ +Apache HttpComponents Client +Copyright 1999-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/x-pack/snapshot-tool/licenses/httpcore-4.4.11.jar.sha1 b/x-pack/snapshot-tool/licenses/httpcore-4.4.11.jar.sha1 new file mode 100644 index 0000000000000..6d64372bfccd8 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/httpcore-4.4.11.jar.sha1 @@ -0,0 +1 @@ +de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/httpcore-LICENSE.txt b/x-pack/snapshot-tool/licenses/httpcore-LICENSE.txt new file mode 100644 index 0000000000000..72819a9f06f2a --- /dev/null +++ b/x-pack/snapshot-tool/licenses/httpcore-LICENSE.txt @@ -0,0 +1,241 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project contains annotations in the package org.apache.http.annotation +which are derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. +See http://www.jcip.net and the Creative Commons Attribution License +(http://creativecommons.org/licenses/by/2.5) +Full text: http://creativecommons.org/licenses/by/2.5/legalcode + +License + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. + +1. Definitions + + "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. + "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. + "Licensor" means the individual or entity that offers the Work under the terms of this License. + "Original Author" means the individual or entity who created the Work. + "Work" means the copyrightable work of authorship offered under the terms of this License. + "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. + +2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: + + to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; + to create and reproduce Derivative Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. + + For the avoidance of doubt, where the work is a musical composition: + Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. + Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). + Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). + +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. + +4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: + + You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. + If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. + +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + + This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. + Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. + +8. Miscellaneous + + Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. + Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. + If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. + This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/x-pack/snapshot-tool/licenses/httpcore-NOTICE.txt b/x-pack/snapshot-tool/licenses/httpcore-NOTICE.txt new file mode 100644 index 0000000000000..c0be50a505ec1 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/httpcore-NOTICE.txt @@ -0,0 +1,8 @@ +Apache HttpComponents Core +Copyright 2005-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This project contains annotations derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/x-pack/snapshot-tool/licenses/jackson-LICENSE b/x-pack/snapshot-tool/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/x-pack/snapshot-tool/licenses/jackson-NOTICE b/x-pack/snapshot-tool/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/x-pack/snapshot-tool/licenses/jackson-annotations-2.8.11.jar.sha1 b/x-pack/snapshot-tool/licenses/jackson-annotations-2.8.11.jar.sha1 new file mode 100644 index 0000000000000..30e7d1a7b1a74 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jackson-annotations-2.8.11.jar.sha1 @@ -0,0 +1 @@ +391de20b4e29cb3fb07d2454ace64be2c82ac91f \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/jackson-core-2.8.11.jar.sha1 b/x-pack/snapshot-tool/licenses/jackson-core-2.8.11.jar.sha1 new file mode 100644 index 0000000000000..e7ad1e74ed6b8 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jackson-core-2.8.11.jar.sha1 @@ -0,0 +1 @@ +876ead1db19f0c9e79c9789273a3ef8c6fd6c29b \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/jackson-databind-2.8.11.3.jar.sha1 b/x-pack/snapshot-tool/licenses/jackson-databind-2.8.11.3.jar.sha1 new file mode 100644 index 0000000000000..253a1361931c3 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jackson-databind-2.8.11.3.jar.sha1 @@ -0,0 +1 @@ +844df5aba5a1a56e00905b165b12bb34116ee858 \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/jaxb-LICENSE.txt b/x-pack/snapshot-tool/licenses/jaxb-LICENSE.txt new file mode 100644 index 0000000000000..a3e62b0878766 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jaxb-LICENSE.txt @@ -0,0 +1,705 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 + +1. Definitions. + + 1.1. Contributor. means each individual or entity that creates or +contributes to the creation of Modifications. + + 1.2. Contributor Version. means the combination of the Original +Software, prior Modifications used by a Contributor (if any), and the +Modifications made by that particular Contributor. + + 1.3. Covered Software. means (a) the Original Software, or (b) +Modifications, or (c) the combination of files containing Original +Software with files containing Modifications, in each case including +portions thereof. + + 1.4. Executable. means the Covered Software in any form other than +Source Code. + + 1.5. Initial Developer. means the individual or entity that first +makes Original Software available under this License. + + 1.6. Larger Work. means a work which combines Covered Software or +portions thereof with code not governed by the terms of this License. + + 1.7. License. means this document. + + 1.8. Licensable. means having the right to grant, to the maximum +extent possible, whether at the time of the initial grant or +subsequently acquired, any and all of the rights conveyed herein. + + 1.9. Modifications. means the Source Code and Executable form of any +of the following: + + A. Any file that results from an addition to, deletion from or +modification of the contents of a file containing Original Software or +previous Modifications; + + B. Any new file that contains any part of the Original Software +or previous Modification; or + + C. Any new file that is contributed or otherwise made available +under the terms of this License. + + 1.10. Original Software. means the Source Code and Executable form of +computer software code that is originally released under this License. + + 1.11. Patent Claims. means any patent claim(s), now owned or +hereafter acquired, including without limitation, method, process, and +apparatus claims, in any patent Licensable by grantor. + + 1.12. Source Code. means (a) the common form of computer software +code in which modifications are made and (b) associated documentation +included in or with such code. + + 1.13. You. (or .Your.) means an individual or a legal entity +exercising rights under, and complying with all of the terms of, this +License. For legal entities, .You. includes any entity which controls, +is controlled by, or is under common control with You. For purposes of +this definition, .control. means (a) the power, direct or indirect, to +cause the direction or management of such entity, whether by contract or +otherwise, or (b) ownership of more than fifty percent (50%) of the +outstanding shares or beneficial ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and +subject to third party intellectual property claims, the Initial +Developer hereby grants You a world-wide, royalty-free, non-exclusive +license: + + (a) under intellectual property rights (other than patent or +trademark) Licensable by Initial Developer, to use, reproduce, modify, +display, perform, sublicense and distribute the Original Software (or +portions thereof), with or without Modifications, and/or as part of a +Larger Work; and + + (b) under Patent Claims infringed by the making, using or +selling of Original Software, to make, have made, use, practice, sell, +and offer for sale, and/or otherwise dispose of the Original Software +(or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are +effective on the date Initial Developer first distributes or otherwise +makes the Original Software available to a third party under the terms +of this License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is +granted: (1) for code that You delete from the Original Software, or (2) +for infringements caused by: (i) the modification of the Original +Software, or (ii) the combination of the Original Software with other +software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject +to third party intellectual property claims, each Contributor hereby +grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or +trademark) Licensable by Contributor to use, reproduce, modify, display, +perform, sublicense and distribute the Modifications created by such +Contributor (or portions thereof), either on an unmodified basis, with +other Modifications, as Covered Software and/or as part of a Larger Work; +and + + (b) under Patent Claims infringed by the making, using, or +selling of Modifications made by that Contributor either alone and/or in +combination with its Contributor Version (or portions of such +combination), to make, use, sell, offer for sale, have made, and/or +otherwise dispose of: (1) Modifications made by that Contributor (or +portions thereof); and (2) the combination of Modifications made by that +Contributor with its Contributor Version (or portions of such +combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are +effective on the date Contributor first distributes or otherwise makes +the Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is +granted: (1) for any code that Contributor has deleted from the +Contributor Version; (2) for infringements caused by: (i) third party +modifications of Contributor Version, or (ii) the combination of +Modifications made by that Contributor with other software (except as +part of the Contributor Version) or other devices; or (3) under Patent +Claims infringed by Covered Software in the absence of Modifications +made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + Any Covered Software that You distribute or otherwise make +available in Executable form must also be made available in Source Code +form and that Source Code form must be distributed only under the terms +of this License. You must include a copy of this License with every copy +of the Source Code form of the Covered Software You distribute or +otherwise make available. You must inform recipients of any such Covered +Software in Executable form as to how they can obtain such Covered +Software in Source Code form in a reasonable manner on or through a +medium customarily used for software exchange. + + 3.2. Modifications. + The Modifications that You create or to which You contribute are +governed by the terms of this License. You represent that You believe +Your Modifications are Your original creation(s) and/or You have +sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + You must include a notice in each of Your Modifications that +identifies You as the Contributor of the Modification. You may not +remove or alter any copyright, patent or trademark notices contained +within the Covered Software, or any notices of licensing or any +descriptive text giving attribution to any Contributor or the Initial +Developer. + + 3.4. Application of Additional Terms. + You may not offer or impose any terms on any Covered Software in +Source Code form that alters or restricts the applicable version of this +License or the recipients. rights hereunder. You may choose to offer, +and to charge a fee for, warranty, support, indemnity or liability +obligations to one or more recipients of Covered Software. However, you +may do so only on Your own behalf, and not on behalf of the Initial +Developer or any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity or liability obligation is offered by +You alone, and You hereby agree to indemnify the Initial Developer and +every Contributor for any liability incurred by the Initial Developer or +such Contributor as a result of warranty, support, indemnity or +liability terms You offer. + + 3.5. Distribution of Executable Versions. + You may distribute the Executable form of the Covered Software +under the terms of this License or under the terms of a license of Your +choice, which may contain terms different from this License, provided +that You are in compliance with the terms of this License and that the +license for the Executable form does not attempt to limit or alter the +recipient.s rights in the Source Code form from the rights set forth in +this License. If You distribute the Covered Software in Executable form +under a different license, You must make it absolutely clear that any +terms which differ from this License are offered by You alone, not by +the Initial Developer or Contributor. You hereby agree to indemnify the +Initial Developer and every Contributor for any liability incurred by +the Initial Developer or such Contributor as a result of any such terms +You offer. + + 3.6. Larger Works. + You may create a Larger Work by combining Covered Software with +other code not governed by the terms of this License and distribute the +Larger Work as a single product. In such a case, You must make sure the +requirements of this License are fulfilled for the Covered Software. + +4. Versions of the License. + + 4.1. New Versions. + Sun Microsystems, Inc. is the initial license steward and may +publish revised and/or new versions of this License from time to time. +Each version will be given a distinguishing version number. Except as +provided in Section 4.3, no one other than the license steward has the +right to modify this License. + + 4.2. Effect of New Versions. + You may always continue to use, distribute or otherwise make the +Covered Software available under the terms of the version of the License +under which You originally received the Covered Software. If the Initial +Developer includes a notice in the Original Software prohibiting it from +being distributed or otherwise made available under any subsequent +version of the License, You must distribute and make the Covered +Software available under the terms of the version of the License under +which You originally received the Covered Software. Otherwise, You may +also choose to use, distribute or otherwise make the Covered Software +available under the terms of any subsequent version of the License +published by the license steward. + + 4.3. Modified Versions. + When You are an Initial Developer and You want to create a new +license for Your Original Software, You may create and use a modified +version of this License if You: (a) rename the license and remove any +references to the name of the license steward (except to note that the +license differs from this License); and (b) otherwise make it clear that +the license contains terms which differ from this License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, +WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, +WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF +DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. +THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED +SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY +RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME +THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS +DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO +USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS +DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate +automatically if You fail to comply with terms herein and fail to cure +such breach within 30 days of becoming aware of the breach. Provisions +which, by their nature, must remain in effect beyond the termination of +this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding +declaratory judgment actions) against Initial Developer or a Contributor +(the Initial Developer or Contributor against whom You assert such claim +is referred to as .Participant.) alleging that the Participant Software +(meaning the Contributor Version where the Participant is a Contributor +or the Original Software where the Participant is the Initial Developer) +directly or indirectly infringes any patent, then any and all rights +granted directly or indirectly to You by such Participant, the Initial +Developer (if the Initial Developer is not the Participant) and all +Contributors under Sections 2.1 and/or 2.2 of this License shall, upon +60 days notice from Participant terminate prospectively and +automatically at the expiration of such 60 day notice period, unless if +within such 60 day period You withdraw Your claim with respect to the +Participant Software against such Participant either unilaterally or +pursuant to a written agreement with Participant. + + 6.3. In the event of termination under Sections 6.1 or 6.2 above, +all end user licenses that have been validly granted by You or any +distributor hereunder prior to termination (excluding licenses granted +to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT +(INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL +DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, +OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY +INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER +INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF +GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL +OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN +INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF +LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY +RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW +PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION +OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION +AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a .commercial item,. as that term is defined +in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer +software. (as that term is defined at 48 C.F.R. º 252.227-7014(a)(1)) +and .commercial computer software documentation. as such terms are used +in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and +48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government +End Users acquire Covered Software with only those rights set forth +herein. This U.S. Government Rights clause is in lieu of, and supersedes, +any other FAR, DFAR, or other clause or provision that addresses +Government rights in computer software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. This License shall be governed by the +law of the jurisdiction specified in a notice contained within the +Original Software (except to the extent applicable law, if any, provides +otherwise), excluding such jurisdiction.s conflict-of-law provisions. +Any litigation relating to this License shall be subject to the +jurisdiction of the courts located in the jurisdiction and venue +specified in a notice contained within the Original Software, with the +losing party responsible for costs, including, without limitation, court +costs and reasonable attorneys. fees and expenses. The application of +the United Nations Convention on Contracts for the International Sale of +Goods is expressly excluded. Any law or regulation which provides that +the language of a contract shall be construed against the drafter shall +not apply to this License. You agree that You alone are responsible for +compliance with the United States export administration regulations (and +the export control laws and regulation of any other countries) when You +use, distribute or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is +responsible for claims and damages arising, directly or indirectly, out +of its utilization of rights under this License and You agree to work +with Initial Developer and Contributors to distribute such +responsibility on an equitable basis. Nothing herein is intended or +shall be deemed to constitute any admission of liability. + + NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND +DISTRIBUTION LICENSE (CDDL) + + The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. + + +The GNU General Public License (GPL) Version 2, June 1991 + + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, +Suite 330, Boston, MA 02111-1307 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to +share and change it. By contrast, the GNU General Public License is +intended to guarantee your freedom to share and change free software--to +make sure the software is free for all its users. This General Public +License applies to most of the Free Software Foundation's software and +to any other program whose authors commit to using it. (Some other Free +Software Foundation software is covered by the GNU Library General +Public License instead.) You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. +Our General Public Licenses are designed to make sure that you have the +freedom to distribute copies of free software (and charge for this +service if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs; and that you know you can do these things. + +To protect your rights, we need to make restrictions that forbid anyone +to deny you these rights or to ask you to surrender the rights. These +restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis +or for a fee, you must give the recipients all the rights that you have. +You must make sure that they, too, receive or can get the source code. +And you must show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + +Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + +Finally, any free program is threatened constantly by software patents. +We wish to avoid the danger that redistributors of a free program will +individually obtain patent licenses, in effect making the program +proprietary. To prevent this, we have made it clear that any patent must +be licensed for everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and +modification follow. + + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a +notice placed by the copyright holder saying it may be distributed under +the terms of this General Public License. The "Program", below, refers +to any such program or work, and a "work based on the Program" means +either the Program or any derivative work under copyright law: that is +to say, a work containing the Program or a portion of it, either +verbatim or with modifications and/or translated into another language. +(Hereinafter, translation is included without limitation in the term +"modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of running +the Program is not restricted, and the output from the Program is +covered only if its contents constitute a work based on the Program +(independent of having been made by running the Program). Whether that +is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source +code as you receive it, in any medium, provided that you conspicuously +and appropriately publish on each copy an appropriate copyright notice +and disclaimer of warranty; keep intact all the notices that refer to +this License and to the absence of any warranty; and give any other +recipients of the Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of +it, thus forming a work based on the Program, and copy and distribute +such modifications or work under the terms of Section 1 above, provided +that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices +stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in +whole or in part contains or is derived from the Program or any part +thereof, to be licensed as a whole at no charge to all third parties +under the terms of this License. + + c) If the modified program normally reads commands interactively when +run, you must cause it, when started running for such interactive use in +the most ordinary way, to print or display an announcement including an +appropriate copyright notice and a notice that there is no warranty (or +else, saying that you provide a warranty) and that users may +redistribute the program under these conditions, and telling the user +how to view a copy of this License. (Exception: if the Program itself is +interactive but does not normally print such an announcement, your work +based on the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, and +can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based on +the Program, the distribution of the whole must be on the terms of this +License, whose permissions for other licensees extend to the entire +whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of a +storage or distribution medium does not bring the other work under the +scope of this License. + +3. You may copy and distribute the Program (or a work based on it, under +Section 2) in object code or executable form under the terms of Sections +1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable +source code, which must be distributed under the terms of Sections 1 and +2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three years, +to give any third party, for a charge no more than your cost of +physically performing source distribution, a complete machine-readable +copy of the corresponding source code, to be distributed under the terms +of Sections 1 and 2 above on a medium customarily used for software +interchange; or, + + c) Accompany it with the information you received as to the offer to +distribute corresponding source code. (This alternative is allowed only +for noncommercial distribution and only if you received the program in +object code or executable form with such an offer, in accord with +Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source code +means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to control +compilation and installation of the executable. However, as a special +exception, the source code distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies the +executable. + +If distribution of executable or object code is made by offering access +to copy from a designated place, then offering equivalent access to copy +the source code from the same place counts as distribution of the source +code, even though third parties are not compelled to copy the source +along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt otherwise +to copy, modify, sublicense or distribute the Program is void, and will +automatically terminate your rights under this License. However, parties +who have received copies, or rights, from you under this License will +not have their licenses terminated so long as such parties remain in +full compliance. + +5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the Program), +you indicate your acceptance of this License to do so, and all its terms +and conditions for copying, distributing or modifying the Program or +works based on it. + +6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further restrictions +on the recipients' exercise of the rights granted herein. You are not +responsible for enforcing compliance by third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot distribute +so as to satisfy simultaneously your obligations under this License and +any other pertinent obligations, then as a consequence you may not +distribute the Program at all. For example, if a patent license would +not permit royalty-free redistribution of the Program by all those who +receive copies directly or indirectly through you, then the only way you +could satisfy both it and this License would be to refrain entirely from +distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is implemented +by public license practices. Many people have made generous +contributions to the wide range of software distributed through that +system in reliance on consistent application of that system; it is up to +the author/donor to decide if he or she is willing to distribute +software through any other system and a licensee cannot impose that +choice. + +This section is intended to make thoroughly clear what is believed to be +a consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License may +add an explicit geographical distribution limitation excluding those +countries, so that distribution is permitted only in or among countries +not thus excluded. In such case, this License incorporates the +limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Program does not specify a version +number of this License, you may choose any version ever published by the +Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the +author to ask for permission. For software which is copyrighted by the +Free Software Foundation, write to the Free Software Foundation; we +sometimes make exceptions for this. Our decision will be guided by the +two goals of preserving the free status of all derivatives of our free +software and of promoting the sharing and reuse of software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER +EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE +ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. +SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY +SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR +DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL +DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM +(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED +INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF +THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR +OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these +terms. + +To do so, attach the following notices to the program. It is safest to +attach them to the start of each source file to most effectively convey +the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + One line to give the program's name and a brief idea of what it does. + + Copyright (C) + + This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2 of the License, or (at your +option) any later version. + + This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +Public License for more details. + + You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software Foundation, +Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show +w'. This is free software, and you are welcome to redistribute it under +certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the +appropriate parts of the General Public License. Of course, the commands +you use may be called something other than `show w' and `show c'; they +could even be mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the +program `Gnomovision' (which makes passes at compilers) written by James +Hacker. + + signature of Ty Coon, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications +with the library. If this is what you want to do, use the GNU Library +General Public License instead of this License. + + +"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 + +Certain source files distributed by Sun Microsystems, Inc. are subject +to the following clarification and special exception to the GPL Version +2, but only where Sun has expressly included in the particular source +file's header the words + +"Sun designates this particular file as subject to the "Classpath" +exception as provided by Sun in the License file that accompanied this +code." + +Linking this library statically or dynamically with other modules is +making a combined work based on this library. Thus, the terms and +conditions of the GNU General Public License Version 2 cover the whole +combination. + +As a special exception, the copyright holders of this library give you +permission to link this library with independent modules to produce an +executable, regardless of the license terms of these independent modules, +and to copy and distribute the resulting executable under terms of your +choice, provided that you also meet, for each linked independent module, +the terms and conditions of the license of that module.? An independent +module is a module which is not derived from or based on this library.? +If you modify this library, you may extend this exception to your +version of the library, but you are not obligated to do so.? If you do +not wish to do so, delete this exception statement from your version. + \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/jaxb-NOTICE.txt b/x-pack/snapshot-tool/licenses/jaxb-NOTICE.txt new file mode 100644 index 0000000000000..8d1c8b69c3fce --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jaxb-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/x-pack/snapshot-tool/licenses/jaxb-api-2.2.2.jar.sha1 b/x-pack/snapshot-tool/licenses/jaxb-api-2.2.2.jar.sha1 new file mode 100644 index 0000000000000..a37e187238933 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jaxb-api-2.2.2.jar.sha1 @@ -0,0 +1 @@ +aeb3021ca93dde265796d82015beecdcff95bf09 \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/jmespath-java-1.11.562.jar.sha1 b/x-pack/snapshot-tool/licenses/jmespath-java-1.11.562.jar.sha1 new file mode 100644 index 0000000000000..8e2d0e1935a3f --- /dev/null +++ b/x-pack/snapshot-tool/licenses/jmespath-java-1.11.562.jar.sha1 @@ -0,0 +1 @@ +1147ed0ad1f2c5a16b8271e38e3cda5cd488c8ae \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/log4j-1.2-api-2.11.1.jar.sha1 b/x-pack/snapshot-tool/licenses/log4j-1.2-api-2.11.1.jar.sha1 new file mode 100644 index 0000000000000..575d75dbda8c5 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/log4j-1.2-api-2.11.1.jar.sha1 @@ -0,0 +1 @@ +3aba3398fe064a3eab4331f88161c7480e848418 \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/log4j-LICENSE.txt b/x-pack/snapshot-tool/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/snapshot-tool/licenses/log4j-NOTICE.txt b/x-pack/snapshot-tool/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/x-pack/snapshot-tool/src/bin/elasticsearch-snapshot b/x-pack/snapshot-tool/src/bin/elasticsearch-snapshot new file mode 100755 index 0000000000000..3a2c3daf6518c --- /dev/null +++ b/x-pack/snapshot-tool/src/bin/elasticsearch-snapshot @@ -0,0 +1,28 @@ +#!/bin/bash + +SCRIPT="$0" + +while [ -h "$SCRIPT" ] ; do + ls=`ls -ld "$SCRIPT"` + # Drop everything prior to -> + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + SCRIPT="$link" + else + SCRIPT=`dirname "$SCRIPT"`/"$link" + fi +done + +JARS_PATH="$(dirname $(dirname $SCRIPT))/libs" + +unset CLASSPATH + +for J in $(cd "${JARS_PATH}"; ls *.jar); do + CLASSPATH=${CLASSPATH}${CLASSPATH:+:}${JARS_PATH}/${J} +done + +for J in $(cd "${JARS_PATH}/vendor"; ls *.jar); do + CLASSPATH=${CLASSPATH}${CLASSPATH:+:}${JARS_PATH}/vendor/${J} +done + +exec java -cp "${CLASSPATH}" org.elasticsearch.snapshots.SnapshotToolCli "$@" \ No newline at end of file diff --git a/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/AbstractRepository.java b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/AbstractRepository.java new file mode 100644 index 0000000000000..bdc78622f1181 --- /dev/null +++ b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/AbstractRepository.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoryData; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +public abstract class AbstractRepository implements Repository { + private static final long DEFAULT_SAFETY_GAP_MILLIS = 3600 * 1000; + private static final int DEFAULT_PARALLELISM = 100; + + private static final String INCOMPATIBLE_SNAPSHOTS = "incompatible-snapshots"; + private static final String NAME = "name"; + private static final String UUID = "uuid"; + + protected final Terminal terminal; + private final long safetyGapMillis; + private final int parallelism; + + protected AbstractRepository(Terminal terminal, Long safetyGapMillis, Integer parallelism) { + this.terminal = terminal; + this.safetyGapMillis = safetyGapMillis == null ? DEFAULT_SAFETY_GAP_MILLIS : safetyGapMillis; + this.parallelism = parallelism == null ? DEFAULT_PARALLELISM : parallelism; + } + + private void describeCollection(String start, Collection elements) { + terminal.println(Terminal.Verbosity.VERBOSE, + start + " has " + elements.size() + " elements: " + elements); + } + + /** + * Reads the incompatible snapshot ids from x-content, loading them into a new instance of {@link RepositoryData} + * that is created from the invoking instance, plus the incompatible snapshots that are read from x-content. + */ + static List incompatibleSnapshotsFromXContent(final XContentParser parser) throws IOException { + List incompatibleSnapshotIds = new ArrayList<>(); + if (parser.nextToken() == XContentParser.Token.START_OBJECT) { + while (parser.nextToken() == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if (INCOMPATIBLE_SNAPSHOTS.equals(currentFieldName)) { + if (parser.nextToken() == XContentParser.Token.START_ARRAY) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + incompatibleSnapshotIds.add(snapshotIdFromXContent(parser)); + } + } else { + throw new ElasticsearchParseException("expected array for [" + currentFieldName + "]"); + } + } else { + throw new ElasticsearchParseException("unknown field name [" + currentFieldName + "]"); + } + } + } else { + throw new ElasticsearchParseException("start object expected"); + } + return incompatibleSnapshotIds; + } + + private static SnapshotId snapshotIdFromXContent(XContentParser parser) throws IOException { + // the new format from 5.0 which contains the snapshot name and uuid + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + String name = null; + String uuid = null; + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + String currentFieldName = parser.currentName(); + parser.nextToken(); + if (NAME.equals(currentFieldName)) { + name = parser.text(); + } else if (UUID.equals(currentFieldName)) { + uuid = parser.text(); + } + } + return new SnapshotId(name, uuid); + } else { + // the old format pre 5.0 that only contains the snapshot name, use the name as the uuid too + final String name = parser.text(); + return new SnapshotId(name, name); + } + } + + @Override + public void cleanup() throws IOException { + terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining latest index file generation and creation timestamp"); + Tuple latestIndexIdAndTimestamp = getLatestIndexIdAndTimestamp(); + if (latestIndexIdAndTimestamp.v1() == -1) { + terminal.println(Terminal.Verbosity.NORMAL, "No index-N files found. Repository is empty or corrupted? Exiting"); + return; + } + long latestIndexId = latestIndexIdAndTimestamp.v1(); + terminal.println(Terminal.Verbosity.VERBOSE, "Latest index file generation is " + latestIndexId); + Date indexNTimestamp = latestIndexIdAndTimestamp.v2(); + Date shiftedIndexNTimestamp = new Date(indexNTimestamp.getTime() - safetyGapMillis); + terminal.println(Terminal.Verbosity.VERBOSE, "Latest index file creation timestamp is " + indexNTimestamp); + terminal.println(Terminal.Verbosity.VERBOSE, "Shifted by safety gap creation timestamp is " + shiftedIndexNTimestamp); + + terminal.println(Terminal.Verbosity.VERBOSE, "Reading latest index file"); + final RepositoryData repositoryData = getRepositoryData(latestIndexId); + final Collection incompatibleSnapshots = getIncompatibleSnapshots(); + if (incompatibleSnapshots.isEmpty() == false) { + throw new ElasticsearchException( + "Found incompatible snapshots which prevent a safe cleanup execution " + incompatibleSnapshots); + } + if (repositoryData.getIndices().isEmpty()) { + throw new ElasticsearchException( + "The repository data contains no references to any indices. Maybe it is from before version 5.x?"); + } + Set referencedIndexIds = repositoryData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); + + describeCollection("Set of indices referenced by index file", referencedIndexIds); + + terminal.println(Terminal.Verbosity.VERBOSE, "Listing indices/ directory"); + Set allIndexIds = getAllIndexDirectoryNames(); + describeCollection("Set of indices inside indices/ directory", allIndexIds); + + Set deletionCandidates = new TreeSet<>(Sets.difference(allIndexIds, referencedIndexIds)); + describeCollection("Set of deletion candidates", deletionCandidates); + if (deletionCandidates.isEmpty()) { + terminal.println(Terminal.Verbosity.NORMAL, "Set of deletion candidates is empty. Exiting"); + return; + } + + ExecutorService executor = EsExecutors.newScaling("snapshot_cleanup", 0, parallelism, 10L, TimeUnit.SECONDS, + EsExecutors.daemonThreadFactory("snapshot_cleanup_tool"), new ThreadContext(Settings.EMPTY)); + try { + PlainActionFuture> orphanedIndicesFuture = new PlainActionFuture<>(); + GroupedActionListener groupedOrphanedIndicesListener = new GroupedActionListener<>(orphanedIndicesFuture, + deletionCandidates.size()); + for (String candidate : deletionCandidates) { + executor.submit(new ActionRunnable<>(groupedOrphanedIndicesListener) { + @Override + protected void doRun() { + if (isOrphaned(candidate, shiftedIndexNTimestamp)) { + groupedOrphanedIndicesListener.onResponse(candidate); + } else { + groupedOrphanedIndicesListener.onResponse(null); + } + } + }); + } + Set orphanedIndexIds = + new TreeSet<>(orphanedIndicesFuture.actionGet().stream().filter(Objects::nonNull).collect(Collectors.toSet())); + describeCollection("Set of orphaned indices", orphanedIndexIds); + if (orphanedIndexIds.isEmpty()) { + terminal.println(Terminal.Verbosity.NORMAL, "Set of orphaned indices is empty. Exiting"); + return; + } + + confirm(terminal, orphanedIndexIds.size() + " indices have been found. Do you want to remove orphaned indices files? " + + "This action is NOT REVERSIBLE"); + + terminal.println(Terminal.Verbosity.NORMAL, "Removing " + orphanedIndexIds.size() + " orphaned indices"); + PlainActionFuture> removalFuture = new PlainActionFuture<>(); + final List> results = Collections.synchronizedList(new ArrayList<>()); + GroupedActionListener groupedRemovalListener = + new GroupedActionListener<>(removalFuture, orphanedIndexIds.size()); + for (final String indexId : orphanedIndexIds) { + executor.submit(new ActionRunnable<>(groupedRemovalListener) { + @Override + protected void doRun() { + terminal.println(Terminal.Verbosity.NORMAL, "Removing orphaned index " + indexId); + Tuple countSize = deleteIndex(indexId); + terminal.println("Index directory " + indexId + ", files removed " + countSize.v1() + + ", bytes freed " + countSize.v2()); + results.add(countSize); + groupedRemovalListener.onResponse(null); + } + }); + } + Exception ex = null; + try { + removalFuture.actionGet(); + } catch (Exception e) { + ex = e; + } + int totalFilesRemoved = results.stream().mapToInt(Tuple::v1).sum(); + long totalSpaceFreed = results.stream().mapToLong(Tuple::v2).sum(); + terminal.println(Terminal.Verbosity.NORMAL, "Total files removed: " + totalFilesRemoved); + terminal.println(Terminal.Verbosity.NORMAL, "Total bytes freed: " + totalSpaceFreed); + terminal.println(Terminal.Verbosity.NORMAL, + "Finished removing " + results.size() + "/" + orphanedIndexIds.size() + " orphaned indices"); + if (ex != null) { + throw new ElasticsearchException(ex); + } + } finally { + executor.shutdown(); + try { + if (executor.awaitTermination(30, TimeUnit.SECONDS) == false) { + terminal.println(Terminal.Verbosity.NORMAL, "Unexpectedly there are still tasks running on the executor"); + } + } catch (InterruptedException e) { + throw new ElasticsearchException(e); + } + } + } + + private boolean isOrphaned(String candidate, Date shiftedIndexNTimestamp) { + terminal.println(Terminal.Verbosity.VERBOSE, "Reading index " + candidate + " last modification timestamp"); + Date indexTimestamp = getIndexTimestamp(candidate); + if (indexTimestamp != null) { + if (indexTimestamp.before(shiftedIndexNTimestamp)) { + terminal.println(Terminal.Verbosity.VERBOSE, + "Index " + candidate + " is orphaned because its modification timestamp " + indexTimestamp + + " is less than index-N shifted timestamp " + shiftedIndexNTimestamp); + return true; + } else { + terminal.println(Terminal.Verbosity.VERBOSE, + "Index " + candidate + " might not be orphaned because its modification timestamp " + + indexTimestamp + + " is gte than index-N shifted timestamp " + shiftedIndexNTimestamp); + } + } + return false; + } + + private void confirm(Terminal terminal, String msg) { + terminal.println(Terminal.Verbosity.NORMAL, msg); + String text = terminal.readText("Confirm [y/N] "); + if (text.equalsIgnoreCase("y") == false) { + throw new ElasticsearchException("Aborted by user"); + } + } +} diff --git a/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/CleanupS3RepositoryCommand.java b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/CleanupS3RepositoryCommand.java new file mode 100644 index 0000000000000..a6cd78bf32e17 --- /dev/null +++ b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/CleanupS3RepositoryCommand.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandLoggingConfigurator; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.Strings; + +public class CleanupS3RepositoryCommand extends Command { + + private final OptionSpec regionOption; + private final OptionSpec endpointOption; + private final OptionSpec bucketOption; + private final OptionSpec basePathOption; + private final OptionSpec accessKeyOption; + private final OptionSpec secretKeyOption; + private final OptionSpec safetyGapMillisOption; + private final OptionSpec parallelismOption; + + public CleanupS3RepositoryCommand() { + super("Command to cleanup orphaned segment files from the S3 repository", + CommandLoggingConfigurator::configureLoggingWithoutConfig); + + regionOption = parser.accepts("region", "S3 region") + .withRequiredArg(); + + endpointOption = parser.accepts("endpoint", "S3 endpoint") + .withRequiredArg(); + + bucketOption = parser.accepts("bucket", "Bucket name") + .withRequiredArg(); + + basePathOption = parser.accepts("base_path", "Base path") + .withRequiredArg(); + + accessKeyOption = parser.accepts("access_key", "Access key") + .withRequiredArg(); + + secretKeyOption = parser.accepts("secret_key", "Secret key") + .withRequiredArg(); + + safetyGapMillisOption = parser.accepts("safety_gap_millis", "Safety gap to account for clock drift") + .withRequiredArg().ofType(Long.class); + + parallelismOption = parser.accepts("parallelism", "How many threads to use to talk to S3") + .withRequiredArg().ofType(Integer.class); + } + + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + String region = regionOption.value(options); + String endpoint = endpointOption.value(options); + + if (Strings.isNullOrEmpty(region) && Strings.isNullOrEmpty(endpoint)) { + throw new ElasticsearchException("region or endpoint option is required for cleaning up S3 repository"); + } + + if (Strings.isNullOrEmpty(region) == false && Strings.isNullOrEmpty(endpoint) == false) { + throw new ElasticsearchException("you must not specify both region and endpoint"); + } + + String bucket = bucketOption.value(options); + if (Strings.isNullOrEmpty(bucket)) { + throw new ElasticsearchException("bucket option is required for cleaning up S3 repository"); + } + + String basePath = basePathOption.value(options); + if (basePath.endsWith("/")) { + throw new ElasticsearchException("there should be not trailing slash in the base path"); + } + + String accessKey = accessKeyOption.value(options); + if (Strings.isNullOrEmpty(accessKey)) { + throw new ElasticsearchException("access_key option is required for cleaning up S3 repository"); + } + + String secretKey = secretKeyOption.value(options); + if (Strings.isNullOrEmpty(secretKey)) { + throw new ElasticsearchException("secret_key option is required for cleaning up S3 repository"); + } + + Long safetyGapMillis = safetyGapMillisOption.value(options); + + if (safetyGapMillis != null && safetyGapMillis < 0L) { + throw new ElasticsearchException("safety_gap_millis should be non-negative"); + } + + Integer parallelism = parallelismOption.value(options); + if (parallelism != null && parallelism < 1) { + throw new ElasticsearchException("parallelism should be at least 1"); + } + + Repository repository = new S3Repository(terminal, safetyGapMillis, parallelism, endpoint, region, accessKey, secretKey, bucket, + basePath); + repository.cleanup(); + } + + // package-private for testing + OptionParser getParser() { + return parser; + } + + +} diff --git a/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/Repository.java b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/Repository.java new file mode 100644 index 0000000000000..c34483d808491 --- /dev/null +++ b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/Repository.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.repositories.RepositoryData; + +import java.io.IOException; +import java.util.Collection; +import java.util.Date; +import java.util.Set; + +public interface Repository { + Tuple getLatestIndexIdAndTimestamp() throws IOException; + + RepositoryData getRepositoryData(long indexFileGeneration) throws IOException; + + Collection getIncompatibleSnapshots() throws IOException; + + Set getAllIndexDirectoryNames(); + + Date getIndexTimestamp(String indexDirectoryName); + + Tuple deleteIndex(String indexDirectoryName); + + void cleanup() throws IOException; +} diff --git a/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/S3Repository.java b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/S3Repository.java new file mode 100644 index 0000000000000..a13bcc029d7fd --- /dev/null +++ b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/S3Repository.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.model.AmazonS3Exception; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.S3ObjectSummary; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +public class S3Repository extends AbstractRepository { + private final AmazonS3 client; + private final String bucket; + private final String basePath; + + S3Repository(Terminal terminal, Long safetyGapMillis, Integer parallelism, String endpoint, String region, String accessKey, + String secretKey, String bucket, + String basePath) { + super(terminal, safetyGapMillis, parallelism); + this.client = buildS3Client(endpoint, region, accessKey, secretKey); + this.basePath = basePath; + this.bucket = bucket; + } + + private static AmazonS3 buildS3Client(String endpoint, String region, String accessKey, String secretKey) { + final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); + builder.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey))); + if (Strings.isNullOrEmpty(region)) { + builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, null)); + } else { + builder.withRegion(region); + } + builder.setClientConfiguration(new ClientConfiguration().withUserAgentPrefix("s3_cleanup_tool")); + + return builder.build(); + } + + private String fullPath(String path) { + return basePath + "/" + path; + } + + @Override + public Tuple getLatestIndexIdAndTimestamp() { + ObjectListing listing = client.listObjects(bucket, fullPath(BlobStoreRepository.INDEX_FILE_PREFIX)); + int prefixLength = fullPath(BlobStoreRepository.INDEX_FILE_PREFIX).length(); + long maxGeneration = -1; + Date timestamp = null; + while (true) { + for (S3ObjectSummary objectSummary : listing.getObjectSummaries()) { + String generationStr = objectSummary.getKey().substring(prefixLength); + try { + long generation = Long.parseLong(generationStr); + if (generation > maxGeneration) { + maxGeneration = generation; + timestamp = objectSummary.getLastModified(); + } + } catch (NumberFormatException e) { + terminal.println(Terminal.Verbosity.VERBOSE, + "Ignoring index file with unexpected name format " + objectSummary.getKey()); + } + } + + if (listing.isTruncated()) { //very unlikely that we have 1K+ index-N files, but let's make it bullet-proof + listing = client.listNextBatchOfObjects(listing); + } else { + return Tuple.tuple(maxGeneration, timestamp); + } + } + } + + @Override + public RepositoryData getRepositoryData(long indexFileGeneration) throws IOException { + final String snapshotsIndexBlobName = BlobStoreRepository.INDEX_FILE_PREFIX + indexFileGeneration; + try (InputStream blob = client.getObject(bucket, fullPath(snapshotsIndexBlobName)).getObjectContent()) { + BytesStreamOutput out = new BytesStreamOutput(); + Streams.copy(blob, out); + // EMPTY is safe here because RepositoryData#fromXContent calls namedObject + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, out.bytes(), XContentType.JSON)) { + return RepositoryData.snapshotsFromXContent(parser, indexFileGeneration); + } + } catch (IOException e) { + terminal.println("Failed to read " + snapshotsIndexBlobName + " file"); + throw e; + } + } + + @Override + public Collection getIncompatibleSnapshots() throws IOException { + try (InputStream blob = client.getObject(bucket, fullPath("incompatible-snapshots")).getObjectContent()) { + BytesStreamOutput out = new BytesStreamOutput(); + Streams.copy(blob, out); + // EMPTY is safe here because RepositoryData#fromXContent calls namedObject + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, out.bytes(), XContentType.JSON)) { + return incompatibleSnapshotsFromXContent(parser); + } + } catch (AmazonS3Exception e) { + if (e.getStatusCode() != RestStatus.NOT_FOUND.getStatus()) { + throw e; + } + return Collections.emptyList(); + } catch (IOException e) { + terminal.println("Failed to read [incompatible-snapshots] blob"); + throw e; + } + } + + @Override + public Set getAllIndexDirectoryNames() { + try { + List prefixes = new ArrayList<>(); + ListObjectsRequest request = new ListObjectsRequest(); + request.setBucketName(bucket); + request.setPrefix(fullPath("indices/")); + request.setDelimiter("/"); + ObjectListing object_listing = client.listObjects(request); + prefixes.addAll(object_listing.getCommonPrefixes()); + + while (object_listing.isTruncated()) { + object_listing = client.listNextBatchOfObjects(object_listing); + prefixes.addAll(object_listing.getCommonPrefixes()); + } + int indicesPrefixLength = fullPath("indices/").length(); + assert prefixes.stream().allMatch(prefix -> prefix.startsWith(fullPath("indices/"))); + return prefixes.stream().map(prefix -> prefix.substring(indicesPrefixLength, prefix.length() - 1)).collect(Collectors.toSet()); + } catch (AmazonServiceException e) { + terminal.println("Failed to list indices"); + throw e; + } + } + + @Override + public Date getIndexTimestamp(String indexDirectoryName) { + /* + * There is shorter way to get modification timestamp of the index directory: + * + * S3Object index = client.getObject(bucket, fullPath("indices/" + indexDirectoryName + "/")); + * return index.getObjectMetadata().getLastModified(); + * + * It also will work if the directory is empty. + * However, on Minio the code above returns some weird dates far in the past. + * So we use listing instead. + */ + final ListObjectsRequest listRequest = new ListObjectsRequest(); + listRequest.setBucketName(bucket); + listRequest.setPrefix(fullPath("indices/" + indexDirectoryName + "/")); + listRequest.setMaxKeys(1); + ObjectListing listing = client.listObjects(listRequest); + List summaries = listing.getObjectSummaries(); + if (summaries.isEmpty()) { + terminal.println(Terminal.Verbosity.VERBOSE, "Failed to find single file in index " + + indexDirectoryName + " directory. " + "Skipping"); + return null; + } else { + S3ObjectSummary any = summaries.get(0); + return any.getLastModified(); + } + } + + private void deleteFiles(List files) { + // AWS has a limit of 1K elements when performing batch remove, + // However, list call never spits out more than 1K elements, so there is no need to partition + terminal.println(Terminal.Verbosity.VERBOSE, "Batch removing the following files " + files); + client.deleteObjects(new DeleteObjectsRequest(bucket).withKeys(Strings.toStringArray(files))); + } + + @Override + public Tuple deleteIndex(String indexDirectoryName) { + int removedFilesCount = 0; + long filesSize = 0L; + String prefix = fullPath("indices/" + indexDirectoryName); + + ObjectListing listing = client.listObjects(bucket, prefix); + while (true) { + List files = listing.getObjectSummaries().stream().map(S3ObjectSummary::getKey).collect(Collectors.toList()); + deleteFiles(files); + removedFilesCount += files.size(); + filesSize += listing.getObjectSummaries().stream().mapToLong(S3ObjectSummary::getSize).sum(); + + if (listing.isTruncated()) { + listing = client.listNextBatchOfObjects(listing); + } else { + return Tuple.tuple(removedFilesCount, filesSize); + } + } + } +} diff --git a/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/SnapshotToolCli.java b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/SnapshotToolCli.java new file mode 100644 index 0000000000000..77cf43dc314b1 --- /dev/null +++ b/x-pack/snapshot-tool/src/main/java/org/elasticsearch/snapshots/SnapshotToolCli.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import org.elasticsearch.cli.CommandLoggingConfigurator; +import org.elasticsearch.cli.LoggingAwareMultiCommand; +import org.elasticsearch.cli.Terminal; + +public class SnapshotToolCli extends LoggingAwareMultiCommand { + + public SnapshotToolCli() { + super("Tool to work with repositories and snapshots"); + CommandLoggingConfigurator.configureLoggingWithoutConfig(); + subcommands.put("cleanup_s3", new CleanupS3RepositoryCommand()); + } + + public static void main(String[] args) throws Exception { + exit(new SnapshotToolCli().main(args, Terminal.DEFAULT)); + } + +} diff --git a/x-pack/snapshot-tool/src/test/java/org/elasticsearch/snapshots/S3CleanupTests.java b/x-pack/snapshot-tool/src/test/java/org/elasticsearch/snapshots/S3CleanupTests.java new file mode 100644 index 0000000000000..c7660c0478e31 --- /dev/null +++ b/x-pack/snapshot-tool/src/test/java/org/elasticsearch/snapshots/S3CleanupTests.java @@ -0,0 +1,333 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import joptsimple.OptionSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; +import org.elasticsearch.repositories.s3.S3RepositoryPlugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.not; + +public class S3CleanupTests extends ESSingleNodeTestCase { + + private BlobStoreRepository repository; + + @Override + public void setUp() throws Exception { + super.setUp(); + createRepository("test-repo"); + repository = (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .setSecureSettings(credentials()) + .build(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(S3RepositoryPlugin.class); + } + + private SecureSettings credentials() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", getAccessKey()); + secureSettings.setString("s3.client.default.secret_key", getSecretKey()); + return secureSettings; + } + + private void createRepository(String repoName) { + Settings.Builder settings = Settings.builder() + .put("bucket", getBucket()) + .put("base_path", getBasePath()) + .put("endpoint", getEndpoint()); + + AcknowledgedResponse putRepositoryResponse = client().admin().cluster() + .preparePutRepository(repoName) + .setType("s3") + .setSettings(settings).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } + + private String getEndpoint() { + return System.getProperty("test.s3.endpoint"); + } + + private String getRegion() { + return ""; + } + + private String getBucket() { + return System.getProperty("test.s3.bucket"); + } + + private String getBasePath() { + return System.getProperty("test.s3.base"); + } + + private String getAccessKey() { + return System.getProperty("test.s3.account"); + } + + private String getSecretKey() { + return System.getProperty("test.s3.key"); + } + + private MockTerminal executeCommand(boolean abort) throws Exception { + return executeCommand(abort, Collections.emptyMap()); + } + + private MockTerminal executeCommand(boolean abort, Map nonDefaultArguments) throws Exception { + final CleanupS3RepositoryCommand command = new CleanupS3RepositoryCommand(); + final OptionSet options = command.getParser().parse( + "--safety_gap_millis", nonDefaultArguments.getOrDefault("safety_gap_millis", "0"), + "--parallelism", nonDefaultArguments.getOrDefault("parallelism", "10"), + "--endpoint", nonDefaultArguments.getOrDefault("endpoint", getEndpoint()), + "--region", nonDefaultArguments.getOrDefault("region", getRegion()), + "--bucket", nonDefaultArguments.getOrDefault("bucket", getBucket()), + "--base_path", nonDefaultArguments.getOrDefault("base_path", getBasePath()), + "--access_key", nonDefaultArguments.getOrDefault("access_key", getAccessKey()), + "--secret_key", nonDefaultArguments.getOrDefault("secret_key", getSecretKey())); + final MockTerminal terminal = new MockTerminal(); + terminal.setVerbosity(Terminal.Verbosity.VERBOSE); + final String input; + + if (abort) { + input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1)); + } else { + input = randomBoolean() ? "y" : "Y"; + } + + terminal.addTextInput(input); + + try { + command.execute(terminal, options); + } catch (ElasticsearchException e) { + if (abort && e.getMessage().contains("Aborted by user")) { + return terminal; + } else { + throw e; + } + } finally { + logger.info("Cleanup command output:\n" + terminal.getOutput()); + } + + return terminal; + } + + private void expectThrows(ThrowingRunnable runnable, String message) { + ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable); + assertThat(ex.getMessage(), containsString(message)); + } + + public void testNoRegionNoEndpoint() { + expectThrows(() -> + executeCommand(false, Map.of("region", "", "endpoint", "")), + "region or endpoint option is required for cleaning up S3 repository"); + } + + public void testRegionAndEndpointSpecified() { + expectThrows(() -> + executeCommand(false, Map.of("region", "test_region", "endpoint", "test_endpoint")), + "you must not specify both region and endpoint"); + } + + public void testNoBucket() { + expectThrows(() -> + executeCommand(false, Map.of("bucket", "")), + "bucket option is required for cleaning up S3 repository"); + } + + public void testNoAccessKey() { + expectThrows(() -> + executeCommand(false, Map.of("access_key", "")), + "access_key option is required for cleaning up S3 repository"); + } + + public void testNoSecretKey() { + expectThrows(() -> + executeCommand(false, Map.of("secret_key", "")), + "secret_key option is required for cleaning up S3 repository"); + } + + public void testNegativeSafetyGap() { + expectThrows(() -> + executeCommand(false, Map.of("safety_gap_millis", "-10")), + "safety_gap_millis should be non-negative"); + } + + public void testInvalidParallelism() { + expectThrows(() -> + executeCommand(false, Map.of("parallelism", "0")), + "parallelism should be at least 1"); + } + + public void testBasePathTrailingSlash() { + expectThrows(() -> + executeCommand(false, Map.of("base_path", getBasePath() + "/")), + "there should be not trailing slash in the base path"); + } + + public void testCleanupS3() throws Exception { + logger.info("--> execute cleanup tool on empty repo, there is nothing to cleanup"); + MockTerminal terminal = executeCommand(false); + assertThat(terminal.getOutput(), containsString("No index-N files found. Repository is empty or corrupted? Exiting")); + + createIndex("test-idx-1"); + createIndex("test-idx-2"); + createIndex("test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test-idx-1", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + client().prepareIndex("test-idx-2", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + client().prepareIndex("test-idx-3", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + client().admin().indices().prepareRefresh().get(); + + // We run multiple iterations of snapshot -> corrupt -> cleanup -> verify -> delete snapshot + // to make sure cleanup tool works correctly regardless of index.latest value + for (int i = 1; i <= randomIntBetween(1, 3); i++) { + logger.info("Iteration number {}", i); + logger.info("--> create first snapshot"); + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot("test-repo", "snap1") + .setWaitForCompletion(true) + .setIndices("test-idx-*", "-test-idx-3") + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client().admin() + .cluster() + .prepareGetSnapshots("test-repo") + .setSnapshots("snap1") + .get() + .getSnapshots("test-repo") + .get(0) + .state(), + equalTo(SnapshotState.SUCCESS)); + + + logger.info("--> execute cleanup tool, there is nothing to cleanup"); + terminal = executeCommand(false); + assertThat(terminal.getOutput(), containsString("Set of deletion candidates is empty. Exiting")); + + logger.info("--> check that there is no inconsistencies after running the tool"); + BlobStoreTestUtil.assertConsistency(repository, repository.threadPool().executor(ThreadPool.Names.GENERIC)); + + logger.info("--> create several dangling indices"); + int numOfFiles = 0; + long size = 0L; + Map> indexToFiles = new TreeMap<>(); + for (int j = 0; j < randomIntBetween(1, 5); j++) { + String name = randomValueOtherThanMany(n -> indexToFiles.containsKey(n), () -> randomAlphaOfLength(5)); + Set files = new TreeSet<>(); + indexToFiles.put(name, files); + for (int k = 0; k < randomIntBetween(1, 5); k++) { + String file = randomValueOtherThanMany(f -> files.contains(f), () -> randomAlphaOfLength(6)); + files.add(file); + numOfFiles++; + } + size += BlobStoreTestUtil.createDanglingIndex(repository, name, files); + } + Set danglingIndices = indexToFiles.keySet(); + + logger.info("--> ensure dangling index folders are visible"); + assertBusy(() -> BlobStoreTestUtil.assertCorruptionVisible(repository, indexToFiles), 10L, TimeUnit.MINUTES); + + logger.info("--> execute cleanup tool, corruption is created latter than snapshot, there is nothing to cleanup"); + terminal = executeCommand(false); + assertThat(terminal.getOutput(), containsString("Set of orphaned indices is empty. Exiting")); + + logger.info("--> create second snapshot"); + createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot("test-repo", "snap2") + .setWaitForCompletion(true) + .setIndices("test-idx-*", "-test-idx-3") + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + logger.info("--> execute cleanup tool again and abort"); + terminal = executeCommand(true); + assertThat(terminal.getOutput(), + containsString("Set of deletion candidates has " + danglingIndices.size() + " elements: " + danglingIndices)); + assertThat(terminal.getOutput(), + containsString("Set of orphaned indices has " + danglingIndices.size() + " elements: " + danglingIndices)); + assertThat(terminal.getOutput(), containsString("This action is NOT REVERSIBLE")); + for (String index : indexToFiles.keySet()) { + assertThat(terminal.getOutput(), not(containsString("Removing orphaned index " + index))); + } + + logger.info("--> execute cleanup tool again and confirm, dangling indices should go"); + terminal = executeCommand(false); + assertThat(terminal.getOutput(), + containsString("Set of deletion candidates has " + danglingIndices.size() + " elements: " + danglingIndices)); + assertThat(terminal.getOutput(), + containsString("Set of orphaned indices has " + danglingIndices.size() + " elements: " + danglingIndices)); + assertThat(terminal.getOutput(), containsString("This action is NOT REVERSIBLE")); + for (String index : indexToFiles.keySet()) { + assertThat(terminal.getOutput(), containsString("Removing orphaned index " + index)); + for (String file : indexToFiles.get(index)) { + assertThat(terminal.getOutput(), containsString(index + "/" + file)); + } + } + assertThat(terminal.getOutput(), + containsString("Total files removed: " + numOfFiles)); + assertThat(terminal.getOutput(), + containsString("Total bytes freed: " + size)); + + logger.info("--> verify that there is no inconsistencies"); + assertBusy(() -> BlobStoreTestUtil.assertConsistency(repository, repository.threadPool().executor(ThreadPool.Names.GENERIC)), + 10L, TimeUnit.MINUTES); + + logger.info("--> perform cleanup by removing snapshots"); + assertTrue(client().admin() + .cluster() + .prepareDeleteSnapshot("test-repo", "snap1") + .get() + .isAcknowledged()); + assertTrue(client().admin() + .cluster() + .prepareDeleteSnapshot("test-repo", "snap2") + .get() + .isAcknowledged()); + } + } +}