diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 172de8b008a7c..fcfb14b2b7979 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -159,6 +159,7 @@ Please follow these formatting guidelines: * Java indent is 4 spaces * Line width is 140 characters +* Line width for code snippets that are included in the documentation (the ones surrounded by `// tag` and `// end` comments) is 76 characters * The rest is left to Java coding standards * Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do. * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. This can be done automatically by your IDE: diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 82b1d8525b101..71828468e64aa 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -31,22 +31,12 @@ if (GradleVersion.current() < GradleVersion.version(minimumGradleVersion)) { throw new GradleException("Gradle ${minimumGradleVersion}+ is required to build elasticsearch") } -if (JavaVersion.current() < JavaVersion.VERSION_1_8) { - throw new GradleException('Java 1.8 is required to build elasticsearch gradle tools') -} - if (project == rootProject) { // change the build dir used during build init, so that doing a clean // won't wipe out the buildscript jar buildDir = 'build-bootstrap' } -// Make sure :buildSrc: doesn't generate classes incompatible with RUNTIME_JAVA_HOME -// We can't use BuildPlugin here, so read from file -String minimumRuntimeVersion = file('src/main/resources/minimumRuntimeVersion').text.trim() -targetCompatibility = minimumRuntimeVersion -sourceCompatibility = minimumRuntimeVersion - /***************************************************************************** * Propagating version.properties to the rest of the build * *****************************************************************************/ @@ -82,6 +72,45 @@ processResources { from tempPropertiesFile } + +if (JavaVersion.current() < JavaVersion.VERSION_1_10) { + throw new GradleException('At least Java 10 is required to build elasticsearch gradle tools') +} + +/***************************************************************************** + * Java version * + *****************************************************************************/ + +// Gradle 4.10 does not support setting this to 11 yet +targetCompatibility = "10" +sourceCompatibility = "10" + +// We have a few classes that need to be compiled for older java versions because these are used to run checks against +// those +sourceSets { + minimumRuntime { + // We only want Java here, but the Groovy doesn't configure javadoc correctly if we don't define this as groovy + groovy { + srcDirs = ['src/main/minimumRuntime'] + } + } +} +compileMinimumRuntimeGroovy { + // We can't use BuildPlugin here, so read from file + String minimumRuntimeVersion = file('src/main/resources/minimumRuntimeVersion').text.trim() + targetCompatibility = minimumRuntimeVersion + sourceCompatibility = minimumRuntimeVersion +} +dependencies { + compile sourceSets.minimumRuntime.output + minimumRuntimeCompile "junit:junit:${props.getProperty('junit')}" + minimumRuntimeCompile localGroovy() +} +jar { + from sourceSets.minimumRuntime.output +} + + /***************************************************************************** * Dependencies used by the entire build * *****************************************************************************/ @@ -94,10 +123,7 @@ dependencies { compile localGroovy() compile "com.carrotsearch.randomizedtesting:junit4-ant:${props.getProperty('randomizedrunner')}" compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" - - compile("junit:junit:${props.getProperty('junit')}") { - transitive = false - } + compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3' compile 'com.netflix.nebula:nebula-publishing-plugin:4.4.4' compile 'com.netflix.nebula:gradle-info-plugin:3.0.3' @@ -156,6 +182,7 @@ if (project != rootProject) { dependenciesInfo.enabled = false forbiddenApisMain.enabled = false forbiddenApisTest.enabled = false + forbiddenApisMinimumRuntime.enabled = false jarHell.enabled = false thirdPartyAudit.enabled = false diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 6a82f90bb0ea8..4d3fe8f19fcce 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -99,12 +99,14 @@ class BuildPlugin implements Plugin { configureSourcesJar(project) configurePomGeneration(project) + applyCommonTestConfig(project) configureTest(project) configurePrecommit(project) configureDependenciesInfo(project) } + /** Performs checks on the build environment and prints information about the build environment. */ static void globalBuildInfo(Project project) { if (project.rootProject.ext.has('buildChecksDone') == false) { @@ -776,9 +778,8 @@ class BuildPlugin implements Plugin { } } - /** Returns a closure of common configuration shared by unit and integration tests. */ - static Closure commonTestConfig(Project project) { - return { + static void applyCommonTestConfig(Project project) { + project.tasks.withType(RandomizedTestingTask) { jvm "${project.runtimeJavaHome}/bin/java" parallelism System.getProperty('tests.jvms', 'auto') ifNoTests System.getProperty('tests.ifNoTests', 'fail') @@ -873,6 +874,8 @@ class BuildPlugin implements Plugin { exclude '**/*$*.class' + dependsOn(project.tasks.testClasses) + project.plugins.withType(ShadowPlugin).whenPluginAdded { // Test against a shadow jar if we made one classpath -= project.tasks.compileJava.outputs.files @@ -884,23 +887,9 @@ class BuildPlugin implements Plugin { /** Configures the test task */ static Task configureTest(Project project) { - RandomizedTestingTask test = project.tasks.getByName('test') - test.configure(commonTestConfig(project)) - test.configure { + project.tasks.getByName('test') { include '**/*Tests.class' } - - // Add a method to create additional unit tests for a project, which will share the same - // randomized testing setup, but by default run no tests. - project.extensions.add('additionalTest', { String name, Closure config -> - RandomizedTestingTask additionalTest = project.tasks.create(name, RandomizedTestingTask.class) - additionalTest.classpath = test.classpath - additionalTest.testClassesDirs = test.testClassesDirs - additionalTest.configure(commonTestConfig(project)) - additionalTest.configure(config) - additionalTest.dependsOn(project.tasks.testClasses) - project.check.dependsOn(additionalTest) - }); } private static configurePrecommit(Project project) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index b29bb7a8cd3b7..d2eb6cc60a576 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -68,7 +68,9 @@ class ClusterConfiguration { * In case of more than one node, this defaults to the number of nodes */ @Input - Closure minimumMasterNodes = { getNumNodes() > 1 ? getNumNodes() : -1 } + Closure minimumMasterNodes = { + return getNumNodes() > 1 ? getNumNodes() : -1 + } @Input String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index ecf3e3420408d..e08fd3f6b75e9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -122,8 +122,31 @@ class ClusterFormationTasks { } NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir) nodes.add(node) - Object dependsOn = startTasks.empty ? startDependencies : startTasks.get(0) - startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, nodes.get(0))) + Closure writeConfigSetup + Object dependsOn + if (node.nodeVersion.onOrAfter("6.5.0-SNAPSHOT")) { + writeConfigSetup = { Map esConfig -> + // Don't force discovery provider if one is set by the test cluster specs already + if (esConfig.containsKey('discovery.zen.hosts_provider') == false) { + esConfig['discovery.zen.hosts_provider'] = 'file' + } + esConfig['discovery.zen.ping.unicast.hosts'] = [] + esConfig + } + dependsOn = startDependencies + } else { + dependsOn = startTasks.empty ? startDependencies : startTasks.get(0) + writeConfigSetup = { Map esConfig -> + String unicastTransportUri = node.config.unicastTransportUri(nodes.get(0), node, project.ant) + if (unicastTransportUri == null) { + esConfig['discovery.zen.ping.unicast.hosts'] = [] + } else { + esConfig['discovery.zen.ping.unicast.hosts'] = "\"${unicastTransportUri}\"" + } + esConfig + } + } + startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, writeConfigSetup)) } Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks, config.nodeStartupWaitSeconds) @@ -182,7 +205,7 @@ class ClusterFormationTasks { * @return a task which starts the node. */ static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, ClusterConfiguration config, - Configuration distribution, NodeInfo seedNode) { + Configuration distribution, Closure writeConfig) { // tasks are chained so their execution order is maintained Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) { @@ -198,7 +221,7 @@ class ClusterFormationTasks { setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node) setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node) setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution) - setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode) + setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, writeConfig) setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node) setup = configureAddKeystoreSettingTasks(prefix, project, setup, node) setup = configureAddKeystoreFileTasks(prefix, project, setup, node) @@ -301,7 +324,7 @@ class ClusterFormationTasks { } /** Adds a task to write elasticsearch.yml for the given node configuration */ - static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, NodeInfo seedNode) { + static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, Closure configFilter) { Map esConfig = [ 'cluster.name' : node.clusterName, 'node.name' : "node-" + node.nodeNum, @@ -347,10 +370,7 @@ class ClusterFormationTasks { Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) writeConfig.doFirst { - String unicastTransportUri = node.config.unicastTransportUri(seedNode, node, project.ant) - if (unicastTransportUri != null) { - esConfig['discovery.zen.ping.unicast.hosts'] = "\"${unicastTransportUri}\"" - } + esConfig = configFilter.call(esConfig) File configFile = new File(node.pathConf, 'elasticsearch.yml') logger.info("Configuring ${configFile}") configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8') @@ -681,6 +701,19 @@ class ClusterFormationTasks { static Task configureWaitTask(String name, Project project, List nodes, List startTasks, int waitSeconds) { Task wait = project.tasks.create(name: name, dependsOn: startTasks) wait.doLast { + + Collection unicastHosts = new HashSet<>() + nodes.forEach { otherNode -> + String unicastHost = otherNode.config.unicastTransportUri(otherNode, null, project.ant) + if (unicastHost != null) { + unicastHosts.addAll(Arrays.asList(unicastHost.split(","))) + } + } + String unicastHostsTxt = String.join("\n", unicastHosts) + nodes.forEach { node -> + node.pathConf.toPath().resolve("unicast_hosts.txt").setText(unicastHostsTxt) + } + ant.waitfor(maxwait: "${waitSeconds}", maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { or { for (NodeInfo node : nodes) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 2838849981a1b..689cf5bf2ed2c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -64,8 +64,6 @@ public class RestIntegTestTask extends DefaultTask { runner.testClassesDirs = project.sourceSets.test.output.classesDirs clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project) - // start with the common test configuration - runner.configure(BuildPlugin.commonTestConfig(project)) // override/add more for rest tests runner.parallelism = '1' runner.include('**/*IT.class') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index a5d3b41339db6..9e41466ebdd73 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -50,6 +50,7 @@ public class StandaloneRestTestPlugin implements Plugin { project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) BuildPlugin.globalBuildInfo(project) BuildPlugin.configureRepositories(project) + BuildPlugin.applyCommonTestConfig(project) // only setup tests to build project.sourceSets.create('test') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index e38163d616661..95818240cdaaa 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -24,7 +24,6 @@ import org.elasticsearch.gradle.BuildPlugin import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.plugins.JavaBasePlugin -import org.gradle.api.tasks.compile.JavaCompile /** * Configures the build to compile against Elasticsearch's test framework and @@ -44,7 +43,6 @@ public class StandaloneTestPlugin implements Plugin { description: 'Runs unit tests that are separate' ] RandomizedTestingTask test = project.tasks.create(testOptions) - test.configure(BuildPlugin.commonTestConfig(project)) BuildPlugin.configureCompile(project) test.classpath = project.sourceSets.test.runtimeClasspath test.testClassesDirs = project.sourceSets.test.output.classesDirs diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 73aad33b8ea24..fcd83a1f46101 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -39,8 +39,8 @@ public class TestClustersPlugin implements Plugin { - public static final String LIST_TASK_NAME = "listElasticSearchClusters"; - public static final String EXTENSION_NAME = "elasticSearchClusters"; + private static final String LIST_TASK_NAME = "listTestClusters"; + private static final String NODE_EXTENSION_NAME = "testClusters"; private final Logger logger = Logging.getLogger(TestClustersPlugin.class); @@ -50,7 +50,7 @@ public void apply(Project project) { ElasticsearchNode.class, (name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project)) ); - project.getExtensions().add(EXTENSION_NAME, container); + project.getExtensions().add(NODE_EXTENSION_NAME, container); Task listTask = project.getTasks().create(LIST_TASK_NAME); listTask.setGroup("ES cluster formation"); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/JdkJarHellCheck.java similarity index 100% rename from buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java rename to buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/JdkJarHellCheck.java diff --git a/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/test/NamingConventionsCheck.java similarity index 100% rename from buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java rename to buildSrc/src/main/minimumRuntime/org/elasticsearch/test/NamingConventionsCheck.java diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 212b407f02d0b..df90fe70497f5 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -614,9 +614,6 @@ - - - diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 021bd9bb15169..f1461dbbd3d97 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -31,11 +31,11 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { public void testListClusters() { BuildResult result = GradleRunner.create() .withProjectDir(getProjectDir("testclusters")) - .withArguments("listElasticSearchClusters", "-s") + .withArguments("listTestClusters", "-s") .withPluginClasspath() .build(); - assertEquals(TaskOutcome.SUCCESS, result.task(":listElasticSearchClusters").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":listTestClusters").getOutcome()); assertOutputContains( result.getOutput(), " * myTestCluster:" diff --git a/buildSrc/src/testKit/testclusters/build.gradle b/buildSrc/src/testKit/testclusters/build.gradle index 083ce97b963bb..470111f056ef9 100644 --- a/buildSrc/src/testKit/testclusters/build.gradle +++ b/buildSrc/src/testKit/testclusters/build.gradle @@ -2,40 +2,40 @@ plugins { id 'elasticsearch.testclusters' } -elasticSearchClusters { +testClusters { myTestCluster { distribution = 'ZIP' } } task user1 { - useCluster elasticSearchClusters.myTestCluster + useCluster testClusters.myTestCluster doLast { println "user1 executing" } } task user2 { - useCluster elasticSearchClusters.myTestCluster + useCluster testClusters.myTestCluster doLast { println "user2 executing" } } task upToDate1 { - useCluster elasticSearchClusters.myTestCluster + useCluster testClusters.myTestCluster } task upToDate2 { - useCluster elasticSearchClusters.myTestCluster + useCluster testClusters.myTestCluster } task skipped1 { enabled = false - useCluster elasticSearchClusters.myTestCluster + useCluster testClusters.myTestCluster } task skipped2 { enabled = false - useCluster elasticSearchClusters.myTestCluster + useCluster testClusters.myTestCluster } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java index d6c41e804df61..4da8d128b98d1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterRequestConverters.java @@ -31,6 +31,8 @@ final class ClusterRequestConverters { + private ClusterRequestConverters() {} + static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException { Request request = new Request(HttpPut.METHOD_NAME, "/_cluster/settings"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java index c1f1e1d115f15..f5387047db158 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java @@ -24,7 +24,9 @@ import java.io.IOException; -public class GraphRequestConverters { +final class GraphRequestConverters { + + private GraphRequestConverters() {} static Request explore(GraphExploreRequest exploreRequest) throws IOException { String endpoint = RequestConverters.endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 740b87107c150..ea81c88f8fef7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -53,7 +53,10 @@ import java.io.IOException; import java.util.Locale; -public class IndicesRequestConverters { +final class IndicesRequestConverters { + + private IndicesRequestConverters() {} + static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { String endpoint = RequestConverters.endpoint(deleteIndexRequest.indices()); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java index e81d716b60f3f..06b4c0fd62ac0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestRequestConverters.java @@ -30,7 +30,9 @@ import java.io.IOException; -public class IngestRequestConverters { +final class IngestRequestConverters { + + private IngestRequestConverters() {} static Request getPipeline(GetPipelineRequest getPipelineRequest) { String endpoint = new RequestConverters.EndpointBuilder() diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java index 0daf1c2d94747..7bda5f552ff52 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseRequestConverters.java @@ -29,7 +29,10 @@ import org.elasticsearch.client.license.GetLicenseRequest; import org.elasticsearch.client.license.PutLicenseRequest; -public class LicenseRequestConverters { +final class LicenseRequestConverters { + + private LicenseRequestConverters() {} + static Request putLicense(PutLicenseRequest putLicenseRequest) { String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_xpack", "license").build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java index 2f5309350df42..ddd1a2a43456e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java @@ -22,7 +22,9 @@ import org.apache.http.client.methods.HttpGet; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -public class MigrationRequestConverters { +final class MigrationRequestConverters { + + private MigrationRequestConverters() {} static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) { RequestConverters.EndpointBuilder endpointBuilder = new RequestConverters.EndpointBuilder() diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index f9094c14a9d84..106caea027e27 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -77,6 +77,7 @@ import org.elasticsearch.script.mustache.SearchTemplateRequest; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.client.core.TermVectorsRequest; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -578,6 +579,19 @@ static Request analyze(AnalyzeRequest request) throws IOException { return req; } + static Request termVectors(TermVectorsRequest tvrequest) throws IOException { + String endpoint = new EndpointBuilder().addPathPart( + tvrequest.getIndex(), tvrequest.getType(), tvrequest.getId()).addPathPartAsIs("_termvectors").build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + Params params = new Params(request); + params.withRouting(tvrequest.getRouting()); + params.withPreference(tvrequest.getPreference()); + params.withFields(tvrequest.getFields()); + params.withRealtime(tvrequest.getRealtime()); + request.setEntity(createEntity(tvrequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request getScript(GetStoredScriptRequest getStoredScriptRequest) { String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(getStoredScriptRequest.id()).build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 86782b364a060..342e3efbb6a35 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -56,6 +56,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.core.TermVectorsResponse; +import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; @@ -156,16 +158,16 @@ import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.ParsedBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.ParsedPercentilesBucket; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.ParsedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ParsedExtendedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.derivative.ParsedDerivative; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; +import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; @@ -1029,6 +1031,36 @@ public final void explainAsync(ExplainRequest explainRequest, RequestOptions opt listener, singleton(404)); } + + /** + * Calls the Term Vectors API + * + * See Term Vectors API on + * elastic.co + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public final TermVectorsResponse termvectors(TermVectorsRequest request, RequestOptions options) throws IOException { + return performRequestAndParseEntity(request, RequestConverters::termVectors, options, TermVectorsResponse::fromXContent, + emptySet()); + } + + /** + * Asynchronously calls the Term Vectors API + * + * See Term Vectors API on + * elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void termvectorsAsync(TermVectorsRequest request, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(request, RequestConverters::termVectors, options, TermVectorsResponse::fromXContent, listener, + emptySet()); + } + + /** * Executes a request using the Ranking Evaluation API. * See Ranking Evaluation API diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java index 7ddd089258539..93fb10bd56136 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java @@ -36,7 +36,9 @@ import java.io.IOException; -public class SnapshotRequestConverters { +final class SnapshotRequestConverters { + + private SnapshotRequestConverters() {} static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) { String[] repositories = getRepositoriesRequest.repositories() == null ? Strings.EMPTY_ARRAY : getRepositoriesRequest.repositories(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java index 93b407a82fe51..45723dcc938c5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java @@ -24,7 +24,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -public class TasksRequestConverters { +final class TasksRequestConverters { + + private TasksRequestConverters() {} static Request cancelTasks(CancelTasksRequest cancelTasksRequest) { Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java index 5378859a999a0..a2b11772c1277 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java @@ -19,6 +19,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.watcher.DeactivateWatchRequest; +import org.elasticsearch.client.watcher.DeactivateWatchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.watcher.ActivateWatchRequest; import org.elasticsearch.client.watcher.ActivateWatchResponse; @@ -125,6 +127,35 @@ public void putWatchAsync(PutWatchRequest request, RequestOptions options, PutWatchResponse::fromXContent, listener, emptySet()); } + /** + * Deactivate an existing watch + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public DeactivateWatchResponse deactivateWatch(DeactivateWatchRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, WatcherRequestConverters::deactivateWatch, options, + DeactivateWatchResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously deactivate an existing watch + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deactivateWatchAsync(DeactivateWatchRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::deactivateWatch, options, + DeactivateWatchResponse::fromXContent, listener, emptySet()); + } + /** * Deletes a watch from the cluster * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java index 68e51a649332d..49764025273b6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java @@ -24,6 +24,7 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; +import org.elasticsearch.client.watcher.DeactivateWatchRequest; import org.elasticsearch.client.watcher.ActivateWatchRequest; import org.elasticsearch.client.watcher.AckWatchRequest; import org.elasticsearch.client.watcher.StartWatchServiceRequest; @@ -32,7 +33,9 @@ import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; -public class WatcherRequestConverters { +final class WatcherRequestConverters { + + private WatcherRequestConverters() {} static Request startWatchService(StartWatchServiceRequest startWatchServiceRequest) { String endpoint = new RequestConverters.EndpointBuilder() @@ -73,6 +76,17 @@ static Request putWatch(PutWatchRequest putWatchRequest) { return request; } + static Request deactivateWatch(DeactivateWatchRequest deactivateWatchRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("watcher") + .addPathPartAsIs("watch") + .addPathPart(deactivateWatchRequest.getWatchId()) + .addPathPartAsIs("_deactivate") + .build(); + return new Request(HttpPut.METHOD_NAME, endpoint); + } + static Request deleteWatch(DeleteWatchRequest deleteWatchRequest) { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_xpack") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java index 1d5b9a418586c..9e0c1527403d5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackRequestConverters.java @@ -27,7 +27,9 @@ import java.util.Locale; import java.util.stream.Collectors; -public class XPackRequestConverters { +final class XPackRequestConverters { + + private XPackRequestConverters() {} static Request info(XPackInfoRequest infoRequest) { Request request = new Request(HttpGet.METHOD_NAME, "/_xpack"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java new file mode 100644 index 0000000000000..5c94dfd0a3375 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java @@ -0,0 +1,228 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; + +public class TermVectorsRequest implements ToXContentObject, Validatable { + + private final String index; + private final String type; + private String id = null; + private String routing = null; + private String preference = null; + private boolean realtime = true; + private String[] fields = null; + private boolean requestPositions = true; + private boolean requestPayloads = true; + private boolean requestOffsets = true; + private boolean requestFieldStatistics = true; + private boolean requestTermStatistics = false; + private Map perFieldAnalyzer = null; + private Map filterSettings = null; + private XContentBuilder docBuilder = null; + + + /** + * Constructs TermVectorRequest for the given document + * @param index - index of the document + * @param type - type of the document + * @param docId - id of the document + */ + public TermVectorsRequest(String index, String type, String docId) { + this(index, type); + this.id = docId; + } + + /** + * Constructs TermVectorRequest for an artificial document + * @param index - index of the document + * @param type - type of the document + */ + public TermVectorsRequest(String index, String type) { + this.index = index; + this.type = type; + } + + /** + * Returns the index of the request + */ + public String getIndex() { + return index; + } + + /** + * Returns the type of the request + */ + public String getType() { + return type; + } + + /** + * Returns the id of the request + * can be NULL if there is no document ID + */ + public String getId() { + return id; + } + + /** + * Sets the fields for which term vectors information should be retrieved + */ + public void setFields(String... fields) { + this.fields = fields; + } + + public String[] getFields() { + return fields; + } + + /** + * Sets whether to request term positions + */ + public void setPositions(boolean requestPositions) { + this.requestPositions = requestPositions; + } + + /** + * Sets whether to request term payloads + */ + public void setPayloads(boolean requestPayloads) { + this.requestPayloads = requestPayloads; + } + + /** + * Sets whether to request term offsets + */ + public void setOffsets(boolean requestOffsets) { + this.requestOffsets = requestOffsets; + } + + /** + * Sets whether to request field statistics + */ + public void setFieldStatistics(boolean requestFieldStatistics) { + this.requestFieldStatistics = requestFieldStatistics; + } + + /** + * Sets whether to request term statistics + */ + public void setTermStatistics(boolean requestTermStatistics) { + this.requestTermStatistics = requestTermStatistics; + } + + /** + * Sets different analyzers than the one at the fields + */ + public void setPerFieldAnalyzer(Map perFieldAnalyzer) { + this.perFieldAnalyzer = perFieldAnalyzer; + } + + /** + * Sets an artifical document on what to request _termvectors + */ + public void setDoc(XContentBuilder docBuilder) { + this.docBuilder = docBuilder; + } + + /** + * Sets conditions for terms filtering + */ + public void setFilterSettings(Map filterSettings) { + this.filterSettings = filterSettings; + } + + /** + * Sets a routing to route a request to a particular shard + */ + public void setRouting(String routing) { + this.routing = routing; + } + + public String getRouting() { + return routing; + } + + /** + * Set a preference of which shard copies to execute the request + */ + public void setPreference(String preference) { + this.preference = preference; + } + + public String getPreference() { + return preference; + } + + /** + * Sets if the request should be realtime or near-realtime + */ + public void setRealtime(boolean realtime) { + this.realtime = realtime; + } + + /** + * Returns if the request is realtime(true) or near-realtime(false) + */ + public boolean getRealtime() { + return realtime; + } + + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + // set values only when different from defaults + if (requestPositions == false) builder.field("positions", false); + if (requestPayloads == false) builder.field("payloads", false); + if (requestOffsets == false) builder.field("offsets", false); + if (requestFieldStatistics == false) builder.field("field_statistics", false); + if (requestTermStatistics) builder.field("term_statistics", true); + if (perFieldAnalyzer != null) builder.field("per_field_analyzer", perFieldAnalyzer); + + if (docBuilder != null) { + BytesReference doc = BytesReference.bytes(docBuilder); + try (InputStream stream = doc.streamInput()) { + builder.rawField("doc", stream, docBuilder.contentType()); + } + } + + if (filterSettings != null) { + builder.startObject("filter"); + String[] filterSettingNames = + {"max_num_terms", "min_term_freq", "max_term_freq", "min_doc_freq", "max_doc_freq", "min_word_length", "max_word_length"}; + for (String settingName : filterSettingNames) { + if (filterSettings.containsKey(settingName)) builder.field(settingName, filterSettings.get(settingName)); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java new file mode 100644 index 0000000000000..5c57fc11b6fe0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java @@ -0,0 +1,486 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +import java.util.Collections; +import java.util.List; +import java.util.Comparator; +import java.util.Objects; + +public class TermVectorsResponse { + private final String index; + private final String type; + private final String id; + private final long docVersion; + private final boolean found; + private final long tookInMillis; + private final List termVectorList; + + public TermVectorsResponse( + String index, String type, String id, long version, boolean found, long tookInMillis, List termVectorList) { + this.index = index; + this.type = type; + this.id = id; + this.docVersion = version; + this.found = found; + this.tookInMillis = tookInMillis; + this.termVectorList = termVectorList; + } + + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("term_vectors", true, + args -> { + // as the response comes from server, we are sure that args[6] will be a list of TermVector + @SuppressWarnings("unchecked") List termVectorList = (List) args[6]; + if (termVectorList != null) { + Collections.sort(termVectorList, Comparator.comparing(TermVector::getFieldName)); + } + return new TermVectorsResponse( + (String) args[0], + (String) args[1], + (String) args[2], + (long) args[3], + (boolean) args[4], + (long) args[5], + termVectorList + ); + } + ); + + static { + PARSER.declareString(constructorArg(), new ParseField("_index")); + PARSER.declareString(constructorArg(), new ParseField("_type")); + PARSER.declareString(optionalConstructorArg(), new ParseField("_id")); + PARSER.declareLong(constructorArg(), new ParseField("_version")); + PARSER.declareBoolean(constructorArg(), new ParseField("found")); + PARSER.declareLong(constructorArg(), new ParseField("took")); + PARSER.declareNamedObjects(optionalConstructorArg(), + (p, c, fieldName) -> TermVector.fromXContent(p, fieldName), new ParseField("term_vectors")); + } + + public static TermVectorsResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * Returns the index for the response + */ + public String getIndex() { + return index; + } + + /** + * Returns the type for the response + */ + public String getType() { + return type; + } + + /** + * Returns the id of the request + * can be NULL if there is no document ID + */ + public String getId() { + return id; + } + + /** + * Returns if the document is found + * always true for artificial documents + */ + public boolean getFound() { + return found; + } + + /** + * Returns the document version + */ + public long getDocVersion() { + return docVersion; + } + + /** + * Returns the time that a request took in milliseconds + */ + public long getTookInMillis() { + return tookInMillis; + } + + /** + * Returns the list of term vectors + */ + public List getTermVectorsList(){ + return termVectorList; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof TermVectorsResponse)) return false; + TermVectorsResponse other = (TermVectorsResponse) obj; + return index.equals(other.index) + && type.equals(other.type) + && Objects.equals(id, other.id) + && docVersion == other.docVersion + && found == other.found + && tookInMillis == tookInMillis + && Objects.equals(termVectorList, other.termVectorList); + } + + @Override + public int hashCode() { + return Objects.hash(index, type, id, docVersion, found, tookInMillis, termVectorList); + } + + + public static final class TermVector { + + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("term_vector", true, + (args, ctxFieldName) -> { + // as the response comes from server, we are sure that args[1] will be a list of Term + @SuppressWarnings("unchecked") List terms = (List) args[1]; + if (terms != null) { + Collections.sort(terms, Comparator.comparing(Term::getTerm)); + } + return new TermVector(ctxFieldName, (FieldStatistics) args[0], terms); + } + ); + + static { + PARSER.declareObject(optionalConstructorArg(), + (p,c) -> FieldStatistics.fromXContent(p), new ParseField("field_statistics")); + PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, term) -> Term.fromXContent(p, term), new ParseField("terms")); + } + + private final String fieldName; + @Nullable + private final FieldStatistics fieldStatistics; + @Nullable + private final List terms; + + public TermVector(String fieldName, FieldStatistics fieldStatistics, List terms) { + this.fieldName = fieldName; + this.fieldStatistics = fieldStatistics; + this.terms = terms; + } + + public static TermVector fromXContent(XContentParser parser, String fieldName) { + return PARSER.apply(parser, fieldName); + } + + /** + * Returns the field name of the current term vector + */ + public String getFieldName() { + return fieldName; + } + + /** + * Returns the list of terms for the current term vector + */ + public List getTerms() { + return terms; + } + + /** + * Returns the field statistics for the current field + */ + public FieldStatistics getFieldStatistics() { + return fieldStatistics; + } + + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof TermVector)) return false; + TermVector other = (TermVector) obj; + return fieldName.equals(other.fieldName) + && Objects.equals(fieldStatistics, other.fieldStatistics) + && Objects.equals(terms, other.terms); + } + + @Override + public int hashCode() { + return Objects.hash(fieldName, fieldStatistics, terms); + } + + // Class containing a general field statistics for the field + public static final class FieldStatistics { + + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "field_statistics", true, + args -> { + return new FieldStatistics((long) args[0], (int) args[1], (long) args[2]); + } + ); + + static { + PARSER.declareLong(constructorArg(), new ParseField("sum_doc_freq")); + PARSER.declareInt(constructorArg(), new ParseField("doc_count")); + PARSER.declareLong(constructorArg(), new ParseField("sum_ttf")); + } + private final long sumDocFreq; + private final int docCount; + private final long sumTotalTermFreq; + + public FieldStatistics(long sumDocFreq, int docCount, long sumTotalTermFreq) { + this.sumDocFreq = sumDocFreq; + this.docCount = docCount; + this.sumTotalTermFreq = sumTotalTermFreq; + } + + public static FieldStatistics fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /* + * Returns how many documents this field contains + */ + public int getDocCount() { + return docCount; + } + + /** + * Returns the sum of document frequencies for all terms in this field + */ + public long getSumDocFreq() { + return sumDocFreq; + } + + /** + * Returns the sum of total term frequencies of all terms in this field + */ + public long getSumTotalTermFreq() { + return sumTotalTermFreq; + } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof FieldStatistics)) return false; + FieldStatistics other = (FieldStatistics) obj; + return docCount == other.docCount + && sumDocFreq == other.sumDocFreq + && sumTotalTermFreq == other.sumTotalTermFreq; + } + + @Override + public int hashCode() { + return Objects.hash(docCount, sumDocFreq, sumTotalTermFreq); + } + } + + + public static final class Term { + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("token", true, + (args, ctxTerm) -> { + // as the response comes from server, we are sure that args[4] will be a list of Token + @SuppressWarnings("unchecked") List tokens = (List) args[4]; + if (tokens != null) { + Collections.sort( + tokens, + Comparator.comparing(Token::getPosition, Comparator.nullsFirst(Integer::compareTo)) + .thenComparing(Token::getStartOffset, Comparator.nullsFirst(Integer::compareTo)) + .thenComparing(Token::getEndOffset, Comparator.nullsFirst(Integer::compareTo)) + ); + } + return new Term(ctxTerm, (int) args[0], (Integer) args[1], (Long) args[2], (Float) args[3], tokens); + } + ); + static { + PARSER.declareInt(constructorArg(), new ParseField("term_freq")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("doc_freq")); + PARSER.declareLong(optionalConstructorArg(), new ParseField("ttf")); + PARSER.declareFloat(optionalConstructorArg(), new ParseField("score")); + PARSER.declareObjectArray(optionalConstructorArg(), (p,c) -> Token.fromXContent(p), new ParseField("tokens")); + } + + private final String term; + private final int termFreq; + @Nullable + private final Integer docFreq; + @Nullable + private final Long totalTermFreq; + @Nullable + private final Float score; + @Nullable + private final List tokens; + + public Term(String term, int termFreq, Integer docFreq, Long totalTermFreq, Float score, List tokens) { + this.term = term; + this.termFreq = termFreq; + this.docFreq = docFreq; + this.totalTermFreq = totalTermFreq; + this.score = score; + this.tokens = tokens; + } + + public static Term fromXContent(XContentParser parser, String term) { + return PARSER.apply(parser, term); + } + + /** + * Returns the string representation of the term + */ + public String getTerm() { + return term; + } + + /** + * Returns term frequency - the number of times this term occurs in the current document + */ + public int getTermFreq() { + return termFreq; + } + + /** + * Returns document frequency - the number of documents in the index that contain this term + */ + public Integer getDocFreq() { + return docFreq; + } + + /** + * Returns total term frequency - the number of times this term occurs across all documents + */ + public Long getTotalTermFreq( ){ + return totalTermFreq; + } + + /** + * Returns tf-idf score, if the request used some form of terms filtering + */ + public Float getScore(){ + return score; + } + + /** + * Returns a list of tokens for the term + */ + public List getTokens() { + return tokens; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Term)) return false; + Term other = (Term) obj; + return term.equals(other.term) + && termFreq == other.termFreq + && Objects.equals(docFreq, other.docFreq) + && Objects.equals(totalTermFreq, other.totalTermFreq) + && Objects.equals(score, other.score) + && Objects.equals(tokens, other.tokens); + } + + @Override + public int hashCode() { + return Objects.hash(term, termFreq, docFreq, totalTermFreq, score, tokens); + } + } + + + public static final class Token { + + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("token", true, + args -> { + return new Token((Integer) args[0], (Integer) args[1], (Integer) args[2], (String) args[3]); + }); + static { + PARSER.declareInt(optionalConstructorArg(), new ParseField("start_offset")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("end_offset")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("position")); + PARSER.declareString(optionalConstructorArg(), new ParseField("payload")); + } + + @Nullable + private final Integer startOffset; + @Nullable + private final Integer endOffset; + @Nullable + private final Integer position; + @Nullable + private final String payload; + + + public Token(Integer startOffset, Integer endOffset, Integer position, String payload) { + this.startOffset = startOffset; + this.endOffset = endOffset; + this.position = position; + this.payload = payload; + } + + public static Token fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * Returns the start offset of the token in the document's field + */ + public Integer getStartOffset() { + return startOffset; + } + + /** + * Returns the end offset of the token in the document's field + */ + public Integer getEndOffset() { + return endOffset; + } + + /** + * Returns the position of the token in the document's field + */ + public Integer getPosition() { + return position; + } + + /** + * Returns the payload of the token or null if the payload doesn't exist + */ + public String getPayload() { + return payload; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Token)) return false; + Token other = (Token) obj; + return Objects.equals(startOffset, other.startOffset) + && Objects.equals(endOffset,other.endOffset) + && Objects.equals(position, other.position) + && Objects.equals(payload, other.payload); + } + + @Override + public int hashCode() { + return Objects.hash(startOffset, endOffset, position, payload); + } + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/RollupJobConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/RollupJobConfig.java index 9bf639f548d3b..d8e87eeb3d5a8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/RollupJobConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/RollupJobConfig.java @@ -42,11 +42,6 @@ /** * This class holds the configuration details of a rollup job, such as the groupings, metrics, what * index to rollup and where to roll them to. - * - * When the configuration is stored server side, if there is no {@link MetricConfig} for the fields referenced in the - * {@link HistogramGroupConfig} and {@link DateHistogramGroupConfig} in the passed {@link GroupConfig}, - * then default metrics of {@code ["min", "max"]} are provided - * */ public class RollupJobConfig implements Validatable, ToXContentObject { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingResponse.java index 04cdb14163e3e..00039f1486e1f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingResponse.java @@ -64,11 +64,10 @@ public int hashCode() { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "put_role_mapping_response", true, args -> new PutRoleMappingResponse((boolean) args[0])); static { - PARSER.declareBoolean(constructorArg(), new ParseField("created")); - // To parse the "created" field we declare "role_mapping" field object. - // Once the nested field "created" is found parser constructs the target object and - // ignores the role_mapping object. - PARSER.declareObject((a,b) -> {}, (parser, context) -> null, new ParseField("role_mapping")); + ConstructingObjectParser roleMappingParser = new ConstructingObjectParser<>( + "put_role_mapping_response.role_mapping", true, args -> (Boolean) args[0]); + roleMappingParser.declareBoolean(constructorArg(), new ParseField("created")); + PARSER.declareObject(constructorArg(), roleMappingParser::parse, new ParseField("role_mapping")); } public static PutRoleMappingResponse fromXContent(XContentParser parser) throws IOException { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/GenericElasticsearchScript.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchRequest.java similarity index 55% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/GenericElasticsearchScript.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchRequest.java index ef2c9513b8e56..b20a56c361f8f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/GenericElasticsearchScript.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchRequest.java @@ -16,24 +16,28 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.client.watcher; -package org.elasticsearch.painless; +import org.elasticsearch.client.Validatable; +import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; -import org.elasticsearch.index.fielddata.ScriptDocValues; +import java.util.Objects; -import java.util.Map; +public class DeactivateWatchRequest implements Validatable { + private final String watchId; -/** - * Generic script interface that Painless implements for all Elasticsearch scripts. - */ -public abstract class GenericElasticsearchScript { + public DeactivateWatchRequest(String watchId) { - public GenericElasticsearchScript() {} + Objects.requireNonNull(watchId, "watch id is missing"); + if (PutWatchRequest.isValidId(watchId) == false) { + throw new IllegalArgumentException("watch id contains whitespace"); + } - public static final String[] PARAMETERS = new String[] {"params", "_score", "doc", "_value", "ctx"}; - public abstract Object execute( - Map params, double _score, Map> doc, Object _value, Map ctx); + this.watchId = watchId; + } - public abstract boolean needs_score(); - public abstract boolean needsCtx(); + public String getWatchId() { + return watchId; + } } + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchResponse.java new file mode 100644 index 0000000000000..08edd211d5b8f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchResponse.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.watcher; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class DeactivateWatchResponse { + private WatchStatus status; + + private static final ParseField STATUS_FIELD = new ParseField("status"); + private static final ConstructingObjectParser PARSER + = new ConstructingObjectParser<>("x_pack_deactivate_watch_response", true, + (fields) -> new DeactivateWatchResponse((WatchStatus) fields[0])); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), + (parser, context) -> WatchStatus.parse(parser), + STATUS_FIELD); + } + + public static DeactivateWatchResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public DeactivateWatchResponse(WatchStatus status) { + this.status = status; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeactivateWatchResponse that = (DeactivateWatchResponse) o; + return Objects.equals(status, that.status); + } + + @Override + public int hashCode() { + return Objects.hash(status); + } + + public WatchStatus getStatus() { + return status; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 3f90552fe9b54..e679a85f67f0c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -44,12 +44,15 @@ import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.core.TermVectorsRequest; +import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; @@ -73,6 +76,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -80,6 +84,7 @@ import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; @@ -196,7 +201,7 @@ public void testGet() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); - assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage()); + assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index [index]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } IndexRequest index = new IndexRequest("index", "type", "id"); @@ -283,7 +288,7 @@ public void testMultiGet() throws IOException { assertEquals("id1", response.getResponses()[0].getFailure().getId()); assertEquals("type", response.getResponses()[0].getFailure().getType()); assertEquals("index", response.getResponses()[0].getFailure().getIndex()); - assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", + assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index [index]]", response.getResponses()[0].getFailure().getFailure().getMessage()); assertTrue(response.getResponses()[1].isFailed()); @@ -291,7 +296,7 @@ public void testMultiGet() throws IOException { assertEquals("id2", response.getResponses()[1].getId()); assertEquals("type", response.getResponses()[1].getType()); assertEquals("index", response.getResponses()[1].getIndex()); - assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", + assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index [index]]", response.getResponses()[1].getFailure().getFailure().getMessage()); } BulkRequest bulk = new BulkRequest(); @@ -1154,4 +1159,80 @@ public void testParamsEncode() throws IOException { assertEquals(routing, getResponse.getField("_routing").getValue()); } } + + // Not entirely sure if _termvectors belongs to CRUD, and in the absence of a better place, will have it here + public void testTermvectors() throws IOException { + final String sourceIndex = "index1"; + { + // prepare : index docs + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + String mappings = "\"_doc\":{\"properties\":{\"field\":{\"type\":\"text\"}}}"; + createIndex(sourceIndex, settings, mappings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex, "_doc", "1") + .source(Collections.singletonMap("field", "value1"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "_doc", "2") + .source(Collections.singletonMap("field", "value2"), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test _termvectors on real documents + TermVectorsRequest tvRequest = new TermVectorsRequest(sourceIndex, "_doc", "1"); + tvRequest.setFields("field"); + TermVectorsResponse tvResponse = execute(tvRequest, highLevelClient()::termvectors, highLevelClient()::termvectorsAsync); + + TermVectorsResponse.TermVector.Token expectedToken = new TermVectorsResponse.TermVector.Token(0, 6, 0, null); + TermVectorsResponse.TermVector.Term expectedTerm = new TermVectorsResponse.TermVector.Term( + "value1", 1, null, null, null, Collections.singletonList(expectedToken)); + TermVectorsResponse.TermVector.FieldStatistics expectedFieldStats = + new TermVectorsResponse.TermVector.FieldStatistics(2, 2, 2); + TermVectorsResponse.TermVector expectedTV = + new TermVectorsResponse.TermVector("field", expectedFieldStats, Collections.singletonList(expectedTerm)); + List expectedTVlist = Collections.singletonList(expectedTV); + + assertThat(tvResponse.getIndex(), equalTo(sourceIndex)); + assertThat(Integer.valueOf(tvResponse.getId()), equalTo(1)); + assertTrue(tvResponse.getFound()); + assertEquals(expectedTVlist, tvResponse.getTermVectorsList()); + } + { + // test _termvectors on artificial documents + TermVectorsRequest tvRequest = new TermVectorsRequest(sourceIndex, "_doc"); + XContentBuilder docBuilder = XContentFactory.jsonBuilder(); + docBuilder.startObject().field("field", "valuex").endObject(); + tvRequest.setDoc(docBuilder); + TermVectorsResponse tvResponse = execute(tvRequest, highLevelClient()::termvectors, highLevelClient()::termvectorsAsync); + + TermVectorsResponse.TermVector.Token expectedToken = new TermVectorsResponse.TermVector.Token(0, 6, 0, null); + TermVectorsResponse.TermVector.Term expectedTerm = new TermVectorsResponse.TermVector.Term( + "valuex", 1, null, null, null, Collections.singletonList(expectedToken)); + TermVectorsResponse.TermVector.FieldStatistics expectedFieldStats = + new TermVectorsResponse.TermVector.FieldStatistics(2, 2, 2); + TermVectorsResponse.TermVector expectedTV = + new TermVectorsResponse.TermVector("field", expectedFieldStats, Collections.singletonList(expectedTerm)); + List expectedTVlist = Collections.singletonList(expectedTV); + + assertThat(tvResponse.getIndex(), equalTo(sourceIndex)); + assertTrue(tvResponse.getFound()); + assertEquals(expectedTVlist, tvResponse.getTermVectorsList()); + } + } + + // Not entirely sure if _termvectors belongs to CRUD, and in the absence of a better place, will have it here + public void testTermvectorsWithNonExistentIndex() { + TermVectorsRequest request = new TermVectorsRequest("non-existent", "non-existent", "non-existent"); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(request, highLevelClient()::termvectors, highLevelClient()::termvectorsAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/GetAliasesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/GetAliasesResponseTests.java index c5bc74e7517c3..92210521b3d0c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/GetAliasesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/GetAliasesResponseTests.java @@ -109,7 +109,7 @@ public void testFromXContentWithElasticsearchException() throws IOException { " \"root_cause\": [" + " {" + " \"type\": \"index_not_found_exception\"," + - " \"reason\": \"no such index\"," + + " \"reason\": \"no such index [index]\"," + " \"resource.type\": \"index_or_alias\"," + " \"resource.id\": \"index\"," + " \"index_uuid\": \"_na_\"," + @@ -117,7 +117,7 @@ public void testFromXContentWithElasticsearchException() throws IOException { " }" + " ]," + " \"type\": \"index_not_found_exception\"," + - " \"reason\": \"no such index\"," + + " \"reason\": \"no such index [index]\"," + " \"resource.type\": \"index_or_alias\"," + " \"resource.id\": \"index\"," + " \"index_uuid\": \"_na_\"," + @@ -131,7 +131,7 @@ public void testFromXContentWithElasticsearchException() throws IOException { assertThat(getAliasesResponse.getError(), nullValue()); assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(getAliasesResponse.getException().getMessage(), - equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index [index]]")); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 533f6bcb22e25..832aba51e2b41 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -573,7 +573,8 @@ public void testAliasesNonExistentIndex() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(nonExistentIndexRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); - assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + assertThat(exception.getMessage(), + equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index [non_existent_index]]")); assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); createIndex(index, Settings.EMPTY); @@ -583,7 +584,8 @@ public void testAliasesNonExistentIndex() throws IOException { exception = expectThrows(ElasticsearchStatusException.class, () -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); - assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + assertThat(exception.getMessage(), + equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index [non_existent_index]]")); assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); assertThat(exception.getMetadata("es.index"), not(hasItem(index))); assertThat(aliasExists(index, alias), equalTo(false)); @@ -595,7 +597,8 @@ public void testAliasesNonExistentIndex() throws IOException { exception = expectThrows(ElasticsearchException.class, () -> execute(removeIndexRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); - assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + assertThat(exception.getMessage(), + equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index [non_existent_index]]")); assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); assertThat(exception.getMetadata("es.index"), not(hasItem(index))); assertThat(aliasExists(index, alias), equalTo(false)); @@ -1060,7 +1063,7 @@ public void testGetAliasesNonExistentIndexOrAlias() throws IOException { highLevelClient().indices()::getAliasAsync); assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(getAliasesResponse.getException().getMessage(), - equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index [index]]")); } { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(alias); @@ -1077,7 +1080,7 @@ public void testGetAliasesNonExistentIndexOrAlias() throws IOException { highLevelClient().indices()::getAliasAsync); assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(getAliasesResponse.getException().getMessage(), - equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index [non_existent_index]]")); } { GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices(index, "non_existent_index").aliases(alias); @@ -1085,7 +1088,7 @@ public void testGetAliasesNonExistentIndexOrAlias() throws IOException { highLevelClient().indices()::getAliasAsync); assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(getAliasesResponse.getException().getMessage(), - equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index [non_existent_index]]")); } { GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices("non_existent_index*"); @@ -1199,7 +1202,8 @@ public void testIndexPutSettingNonExistent() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); - assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + assertThat(exception.getMessage(), + equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index [index]]")); createIndex(index, Settings.EMPTY); exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 8887bed226ca1..0dc0a67cf7e16 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.RequestConverters.EndpointBuilder; import org.elasticsearch.common.CheckedBiConsumer; @@ -1177,6 +1178,46 @@ public void testExplain() throws IOException { assertToXContentBody(explainRequest, request.getEntity()); } + public void testTermVectors() throws IOException { + String index = randomAlphaOfLengthBetween(3, 10); + String type = randomAlphaOfLengthBetween(3, 10); + String id = randomAlphaOfLengthBetween(3, 10); + TermVectorsRequest tvRequest = new TermVectorsRequest(index, type, id); + Map expectedParams = new HashMap<>(); + String[] fields; + if (randomBoolean()) { + String routing = randomAlphaOfLengthBetween(3, 10); + tvRequest.setRouting(routing); + expectedParams.put("routing", routing); + } + if (randomBoolean()) { + tvRequest.setRealtime(false); + expectedParams.put("realtime", "false"); + } + + boolean hasFields = randomBoolean(); + if (hasFields) { + fields = generateRandomStringArray(10, 5, false, false); + tvRequest.setFields(fields); + } + + Request request = RequestConverters.termVectors(tvRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add(index).add(type).add(id).add("_termvectors"); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + if (hasFields) { + assertThat(request.getParameters(), hasKey("fields")); + String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields")); + assertArrayEquals(tvRequest.getFields(), requestFields); + } + for (Map.Entry param : expectedParams.entrySet()) { + assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue())); + } + assertToXContentBody(tvRequest, request.getEntity()); + } + public void testFieldCaps() { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index fda7ecdd6d6a2..9535043e395d1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -661,8 +661,7 @@ public void testApiNamingConventions() throws Exception { "mtermvectors", "render_search_template", "scripts_painless_execute", - "tasks.get", - "termvectors" + "tasks.get" }; //These API are not required for high-level client feature completeness String[] notRequiredApi = new String[] { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index e30c1b383a215..7a5f873d45cc7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -46,7 +46,6 @@ import org.elasticsearch.client.rollup.job.config.MetricConfig; import org.elasticsearch.client.rollup.job.config.RollupJobConfig; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -169,9 +168,7 @@ public void testDeleteMissingRollupJob() { public void testPutAndGetRollupJob() throws Exception { // TODO expand this to also test with histogram and terms? final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); - final List metrics = Arrays.asList( - new MetricConfig("value", SUPPORTED_METRICS), - new MetricConfig("date", Arrays.asList(MaxAggregationBuilder.NAME))); + final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600)); PutRollupJobRequest putRollupJobRequest = @@ -199,28 +196,21 @@ public void testPutAndGetRollupJob() throws Exception { assertEquals(groups.getDateHistogram().getTimeZone(), source.get("date.date_histogram.time_zone")); for (MetricConfig metric : metrics) { - if (metric.getField().equals("value")) { - for (String name : metric.getMetrics()) { - Number value = (Number) source.get(metric.getField() + "." + name + ".value"); - if ("min".equals(name)) { - assertEquals(min, value.intValue()); - } else if ("max".equals(name)) { - assertEquals(max, value.intValue()); - } else if ("sum".equals(name)) { - assertEquals(sum, value.doubleValue(), 0.0d); - } else if ("avg".equals(name)) { - assertEquals(sum, value.doubleValue(), 0.0d); - Number avgCount = (Number) source.get(metric.getField() + "." + name + "._count"); - assertEquals(numDocs, avgCount.intValue()); - } else if ("value_count".equals(name)) { - assertEquals(numDocs, value.intValue()); - } + for (String name : metric.getMetrics()) { + Number value = (Number) source.get(metric.getField() + "." + name + ".value"); + if ("min".equals(name)) { + assertEquals(min, value.intValue()); + } else if ("max".equals(name)) { + assertEquals(max, value.intValue()); + } else if ("sum".equals(name)) { + assertEquals(sum, value.doubleValue(), 0.0d); + } else if ("avg".equals(name)) { + assertEquals(sum, value.doubleValue(), 0.0d); + Number avgCount = (Number) source.get(metric.getField() + "." + name + "._count"); + assertEquals(numDocs, avgCount.intValue()); + } else if ("value_count".equals(name)) { + assertEquals(numDocs, value.intValue()); } - } else { - Number value = (Number) source.get(metric.getField() + ".max.value"); - assertEquals( - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2018-01-01T00:59:50").getMillis(), - value.longValue()); } } }); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 063fce9bcac5e..f6aa97def28e4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -1079,7 +1079,7 @@ public void testExplainNonExistent() throws IOException { assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(exception.getIndex().getName(), equalTo("non_existent_index")); assertThat(exception.getDetailedMessage(), - containsString("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + containsString("Elasticsearch exception [type=index_not_found_exception, reason=no such index [non_existent_index]]")); } { ExplainRequest explainRequest = new ExplainRequest("index1", "doc", "999"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java index b1d3dc0103ca3..b069d211b2ee8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java @@ -19,6 +19,10 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.client.watcher.DeactivateWatchRequest; +import org.elasticsearch.client.watcher.DeactivateWatchResponse; +import org.elasticsearch.client.watcher.ActivateWatchRequest; +import org.elasticsearch.client.watcher.ActivateWatchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.watcher.AckWatchRequest; import org.elasticsearch.client.watcher.AckWatchResponse; @@ -73,6 +77,23 @@ private PutWatchResponse createWatch(String watchId) throws Exception { return highLevelClient().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT); } + public void testDeactivateWatch() throws Exception { + // Deactivate a watch that exists + String watchId = randomAlphaOfLength(10); + createWatch(watchId); + DeactivateWatchResponse response = highLevelClient().watcher().deactivateWatch( + new DeactivateWatchRequest(watchId), RequestOptions.DEFAULT); + assertThat(response.getStatus().state().isActive(), is(false)); + } + public void testDeactivateWatch404() throws Exception { + // Deactivate a watch that does not exist + String watchId = randomAlphaOfLength(10); + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> highLevelClient().watcher().deactivateWatch(new DeactivateWatchRequest(watchId), RequestOptions.DEFAULT)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + + } + public void testDeleteWatch() throws Exception { // delete watch that exists { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java index 1c422c2b8ec4c..df6f697fb975a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java @@ -22,6 +22,7 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.watcher.DeactivateWatchRequest; import org.elasticsearch.client.watcher.ActivateWatchRequest; import org.elasticsearch.client.watcher.AckWatchRequest; import org.elasticsearch.client.watcher.StartWatchServiceRequest; @@ -83,6 +84,15 @@ public void testPutWatch() throws Exception { assertThat(bos.toString("UTF-8"), is(body)); } + public void testDeactivateWatch() { + String watchId = randomAlphaOfLength(10); + DeactivateWatchRequest deactivateWatchRequest = new DeactivateWatchRequest(watchId); + Request request = WatcherRequestConverters.deactivateWatch(deactivateWatchRequest); + + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/watcher/watch/" + watchId + "/_deactivate", request.getEndpoint()); + } + public void testDeleteWatch() { DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(); String watchId = randomAlphaOfLength(10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/TermVectorsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/TermVectorsResponseTests.java new file mode 100644 index 0000000000000..67b2704c58d29 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/TermVectorsResponseTests.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.io.IOException; +import java.util.Collections; +import java.util.Comparator; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class TermVectorsResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + TermVectorsResponse::fromXContent) + .supportsUnknownFields(true) + .randomFieldsExcludeFilter(field -> + field.endsWith("term_vectors") || field.endsWith("terms") || field.endsWith("tokens")) + .test(); + } + + private void toXContent(TermVectorsResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field("_index", response.getIndex()); + builder.field("_type", response.getType()); + if (response.getId() != null) { + builder.field("_id", response.getId()); + } + builder.field("_version", response.getDocVersion()); + builder.field("found", response.getFound()); + builder.field("took", response.getTookInMillis()); + List termVectorList = response.getTermVectorsList(); + if (termVectorList != null) { + Collections.sort(termVectorList, Comparator.comparing(TermVectorsResponse.TermVector::getFieldName)); + builder.startObject("term_vectors"); + for (TermVectorsResponse.TermVector tv : termVectorList) { + toXContent(tv, builder); + } + builder.endObject(); + } + builder.endObject(); + } + + private void toXContent(TermVectorsResponse.TermVector tv, XContentBuilder builder) throws IOException { + builder.startObject(tv.getFieldName()); + // build fields_statistics + if (tv.getFieldStatistics() != null) { + builder.startObject("field_statistics"); + builder.field("sum_doc_freq", tv.getFieldStatistics().getSumDocFreq()); + builder.field("doc_count", tv.getFieldStatistics().getDocCount()); + builder.field("sum_ttf", tv.getFieldStatistics().getSumTotalTermFreq()); + builder.endObject(); + } + // build terms + List terms = tv.getTerms(); + if (terms != null) { + Collections.sort(terms, Comparator.comparing(TermVectorsResponse.TermVector.Term::getTerm)); + builder.startObject("terms"); + for (TermVectorsResponse.TermVector.Term term : terms) { + builder.startObject(term.getTerm()); + // build term_statistics + if (term.getDocFreq() != null) builder.field("doc_freq", term.getDocFreq()); + if (term.getTotalTermFreq() != null) builder.field("ttf", term.getTotalTermFreq()); + builder.field("term_freq", term.getTermFreq()); + + // build tokens + List tokens = term.getTokens(); + if (tokens != null) { + Collections.sort( + tokens, + Comparator.comparing(TermVectorsResponse.TermVector.Token::getPosition, Comparator.nullsFirst(Integer::compareTo)) + .thenComparing(TermVectorsResponse.TermVector.Token::getStartOffset, Comparator.nullsFirst(Integer::compareTo)) + .thenComparing(TermVectorsResponse.TermVector.Token::getEndOffset, Comparator.nullsFirst(Integer::compareTo)) + ); + builder.startArray("tokens"); + for (TermVectorsResponse.TermVector.Token token : tokens) { + builder.startObject(); + if (token.getPosition() != null) builder.field("position", token.getPosition()); + if (token.getStartOffset()!= null) builder.field("start_offset", token.getStartOffset()); + if (token.getEndOffset() != null) builder.field("end_offset", token.getEndOffset()); + if (token.getPayload() != null) builder.field("payload", token.getPayload()); + builder.endObject(); + } + builder.endArray(); + } + if (term.getScore() != null) builder.field("score", term.getScore()); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + + + protected TermVectorsResponse createTestInstance() { + String index = randomAlphaOfLength(5); + String type = randomAlphaOfLength(5); + String id = String.valueOf(randomIntBetween(1,100)); + long version = randomNonNegativeLong(); + long tookInMillis = randomNonNegativeLong(); + boolean found = randomBoolean(); + List tvList = null; + if (found == true){ + boolean hasFieldStatistics = randomBoolean(); + boolean hasTermStatistics = randomBoolean(); + boolean hasScores = randomBoolean(); + boolean hasOffsets = randomBoolean(); + boolean hasPositions = randomBoolean(); + boolean hasPayloads = randomBoolean(); + int fieldsCount = randomIntBetween(1, 3); + tvList = new ArrayList<>(fieldsCount); + for (int i = 0; i < fieldsCount; i++) { + tvList.add(randomTermVector(hasFieldStatistics, hasTermStatistics, hasScores, hasOffsets, hasPositions, hasPayloads)); + } + } + TermVectorsResponse tvresponse = new TermVectorsResponse(index, type, id, version, found, tookInMillis, tvList); + return tvresponse; + } + + private TermVectorsResponse.TermVector randomTermVector(boolean hasFieldStatistics, boolean hasTermStatistics, boolean hasScores, + boolean hasOffsets, boolean hasPositions, boolean hasPayloads) { + TermVectorsResponse.TermVector.FieldStatistics fs = null; + if (hasFieldStatistics) { + long sumDocFreq = randomNonNegativeLong(); + int docCount = randomInt(1000); + long sumTotalTermFreq = randomNonNegativeLong(); + fs = new TermVectorsResponse.TermVector.FieldStatistics(sumDocFreq, docCount, sumTotalTermFreq); + } + + int termsCount = randomIntBetween(1, 5); + List terms = new ArrayList<>(termsCount); + for (int i = 0; i < termsCount; i++) { + terms.add(randomTerm(hasTermStatistics, hasScores, hasOffsets, hasPositions, hasPayloads)); + } + + TermVectorsResponse.TermVector tv = new TermVectorsResponse.TermVector("field" + randomAlphaOfLength(2), fs, terms); + return tv; + } + + private TermVectorsResponse.TermVector.Term randomTerm(boolean hasTermStatistics, boolean hasScores, + boolean hasOffsets, boolean hasPositions, boolean hasPayloads) { + + String termTxt = "term" + randomAlphaOfLength(2); + int termFreq = randomInt(10000); + Integer docFreq = null; + Long totalTermFreq = null; + Float score = null; + List tokens = null; + if (hasTermStatistics) { + docFreq = randomInt(1000); + totalTermFreq = randomNonNegativeLong(); + } + if (hasScores) score = randomFloat(); + if (hasOffsets || hasPositions || hasPayloads ){ + int tokensCount = randomIntBetween(1, 5); + tokens = new ArrayList<>(tokensCount); + for (int i = 0; i < tokensCount; i++) { + Integer startOffset = null; + Integer endOffset = null; + Integer position = null; + String payload = null; + if (hasOffsets) { + startOffset = randomInt(1000); + endOffset = randomInt(2000); + } + if (hasPositions) position = randomInt(100); + if (hasPayloads) payload = "payload" + randomAlphaOfLength(2); + TermVectorsResponse.TermVector.Token token = + new TermVectorsResponse.TermVector.Token(startOffset, endOffset, position, payload); + tokens.add(token); + } + } + TermVectorsResponse.TermVector.Term term = + new TermVectorsResponse.TermVector.Term(termTxt, termFreq, docFreq, totalTermFreq, score, tokens); + return term; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 4e3f778cd151b..7f3b980becd8d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -25,6 +25,8 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor; @@ -52,6 +54,8 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RethrottleRequest; +import org.elasticsearch.client.core.TermVectorsRequest; +import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; @@ -1503,6 +1507,125 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) } } + // Not entirely sure if _termvectors belongs to CRUD, and in the absence of a better place, will have it here + public void testTermVectors() throws Exception { + RestHighLevelClient client = highLevelClient(); + CreateIndexRequest authorsRequest = new CreateIndexRequest("authors").mapping("doc", "user", "type=keyword"); + CreateIndexResponse authorsResponse = client.indices().create(authorsRequest, RequestOptions.DEFAULT); + assertTrue(authorsResponse.isAcknowledged()); + client.index(new IndexRequest("index", "doc", "1").source("user", "kimchy"), RequestOptions.DEFAULT); + Response refreshResponse = client().performRequest(new Request("POST", "/authors/_refresh")); + assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); + + { + // tag::term-vectors-request + TermVectorsRequest request = new TermVectorsRequest("authors", "doc", "1"); + request.setFields("user"); + // end::term-vectors-request + } + + { + // tag::term-vectors-request-artificial + TermVectorsRequest request = new TermVectorsRequest("authors", "doc"); + XContentBuilder docBuilder = XContentFactory.jsonBuilder(); + docBuilder.startObject().field("user", "guest-user").endObject(); + request.setDoc(docBuilder); // <1> + // end::term-vectors-request-artificial + + // tag::term-vectors-request-optional-arguments + request.setFieldStatistics(false); // <1> + request.setTermStatistics(true); // <2> + request.setPositions(false); // <3> + request.setOffsets(false); // <4> + request.setPayloads(false); // <5> + + Map filterSettings = new HashMap<>(); + filterSettings.put("max_num_terms", 3); + filterSettings.put("min_term_freq", 1); + filterSettings.put("max_term_freq", 10); + filterSettings.put("min_doc_freq", 1); + filterSettings.put("max_doc_freq", 100); + filterSettings.put("min_word_length", 1); + filterSettings.put("max_word_length", 10); + + request.setFilterSettings(filterSettings); // <6> + + Map perFieldAnalyzer = new HashMap<>(); + perFieldAnalyzer.put("user", "keyword"); + request.setPerFieldAnalyzer(perFieldAnalyzer); // <7> + + request.setRealtime(false); // <8> + request.setRouting("routing"); // <9> + // end::term-vectors-request-optional-arguments + } + + TermVectorsRequest request = new TermVectorsRequest("authors", "doc", "1"); + request.setFields("user"); + + // tag::term-vectors-execute + TermVectorsResponse response = client.termvectors(request, RequestOptions.DEFAULT); + // end::term-vectors-execute + + + // tag::term-vectors-response + String index = response.getIndex(); // <1> + String type = response.getType(); // <2> + String id = response.getId(); // <3> + boolean found = response.getFound(); // <4> + // end::term-vectors-response + + // tag::term-vectors-term-vectors + if (response.getTermVectorsList() != null) { + List tvList = response.getTermVectorsList(); + for (TermVectorsResponse.TermVector tv : tvList) { + String fieldname = tv.getFieldName(); // <1> + int docCount = tv.getFieldStatistics().getDocCount(); // <2> + long sumTotalTermFreq = tv.getFieldStatistics().getSumTotalTermFreq(); // <3> + long sumDocFreq = tv.getFieldStatistics().getSumDocFreq(); // <4> + if (tv.getTerms() != null) { + List terms = tv.getTerms(); // <5> + for (TermVectorsResponse.TermVector.Term term : terms) { + String termStr = term.getTerm(); // <6> + int termFreq = term.getTermFreq(); // <7> + int docFreq = term.getDocFreq(); // <8> + long totalTermFreq = term.getTotalTermFreq(); // <9> + float score = term.getScore(); // <10> + if (term.getTokens() != null) { + List tokens = term.getTokens(); // <11> + for (TermVectorsResponse.TermVector.Token token : tokens) { + int position = token.getPosition(); // <12> + int startOffset = token.getStartOffset(); // <13> + int endOffset = token.getEndOffset(); // <14> + String payload = token.getPayload(); // <15> + } + } + } + } + } + } + // end::term-vectors-term-vectors + + // tag::term-vectors-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(TermVectorsResponse termVectorsResponse) { + // <1> + } + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::term-vectors-execute-listener + CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + // tag::term-vectors-execute-async + client.termvectorsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::term-vectors-execute-async + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + } + @SuppressWarnings("unused") public void testMultiGet() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -1592,7 +1715,7 @@ public void testMultiGet() throws Exception { // TODO status is broken! fix in a followup // assertEquals(RestStatus.NOT_FOUND, ee.status()); // <4> assertThat(e.getMessage(), - containsString("reason=no such index")); // <5> + containsString("reason=no such index [missing_index]")); // <5> // end::multi-get-indexnotfound // tag::multi-get-execute-listener diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java index b9562754e9168..165bda95dfc3d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java @@ -32,6 +32,8 @@ import org.elasticsearch.client.watcher.AckWatchResponse; import org.elasticsearch.client.watcher.ActionStatus; import org.elasticsearch.client.watcher.ActionStatus.AckStatus; +import org.elasticsearch.client.watcher.DeactivateWatchRequest; +import org.elasticsearch.client.watcher.DeactivateWatchResponse; import org.elasticsearch.client.watcher.StartWatchServiceRequest; import org.elasticsearch.client.watcher.StopWatchServiceRequest; import org.elasticsearch.client.watcher.WatchStatus; @@ -47,6 +49,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.is; + public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase { public void testStartStopWatchService() throws Exception { @@ -297,6 +301,57 @@ public void onFailure(Exception e) { } } + public void testDeactivateWatch() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + BytesReference watch = new BytesArray("{ \n" + + " \"trigger\": { \"schedule\": { \"interval\": \"10h\" } },\n" + + " \"input\": { \"simple\": { \"foo\" : \"bar\" } },\n" + + " \"actions\": { \"logme\": { \"logging\": { \"text\": \"{{ctx.payload}}\" } } }\n" + + "}"); + PutWatchRequest putWatchRequest = new PutWatchRequest("my_watch_id", watch, XContentType.JSON); + client.watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT); + } + + { + //tag::deactivate-watch-execute + DeactivateWatchRequest request = new DeactivateWatchRequest("my_watch_id"); + DeactivateWatchResponse response = client.watcher().deactivateWatch(request, RequestOptions.DEFAULT); + //end::deactivate-watch-execute + + assertThat(response.getStatus().state().isActive(), is(false)); + } + + { + DeactivateWatchRequest request = new DeactivateWatchRequest("my_watch_id"); + // tag::deactivate-watch-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DeactivateWatchResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::deactivate-watch-execute-listener + + // For testing, replace the empty listener by a blocking listener. + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::deactivate-watch-execute-async + client.watcher().deactivateWatchAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::deactivate-watch-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testActivateWatch() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeactivateWatchRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeactivateWatchRequestTests.java new file mode 100644 index 0000000000000..d92a51f96c26a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeactivateWatchRequestTests.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.watcher; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.is; + +public class DeactivateWatchRequestTests extends ESTestCase { + + public void testNullId() { + NullPointerException actual = expectThrows(NullPointerException.class, () -> new DeactivateWatchRequest(null)); + assertNotNull(actual); + assertThat(actual.getMessage(), is("watch id is missing")); + } + + public void testInvalidId() { + IllegalArgumentException actual = expectThrows(IllegalArgumentException.class, + () -> new DeactivateWatchRequest("Watch id has spaces")); + assertNotNull(actual); + assertThat(actual.getMessage(), is("watch id contains whitespace")); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeactivateWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeactivateWatchResponseTests.java new file mode 100644 index 0000000000000..dd56c8b054e64 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeactivateWatchResponseTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.watcher; + + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class DeactivateWatchResponseTests extends ESTestCase { + + public void testBasicParsing() throws IOException { + XContentType contentType = randomFrom(XContentType.values()); + int version = randomInt(); + ExecutionState executionState = randomFrom(ExecutionState.values()); + XContentBuilder builder = XContentFactory.contentBuilder(contentType).startObject() + .startObject("status") + .field("version", version) + .field("execution_state", executionState) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + DeactivateWatchResponse response = parse(contentType, bytes); + WatchStatus status = response.getStatus(); + assertNotNull(status); + assertEquals(version, status.version()); + assertEquals(executionState, status.getExecutionState()); + } + + private DeactivateWatchResponse parse(XContentType contentType, BytesReference bytes) throws IOException { + XContentParser parser = XContentFactory.xContent(contentType) + .createParser(NamedXContentRegistry.EMPTY, null, bytes.streamInput()); + parser.nextToken(); + return DeactivateWatchResponse.fromXContent(parser); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 90801715b7e20..9191f5025581b 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,7 +36,7 @@ import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; @@ -84,7 +84,8 @@ public class RestClientDocumentation { RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); builder.addHeader("Authorization", "Bearer " + TOKEN); // <1> builder.setHttpAsyncResponseConsumerFactory( // <2> - new HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024)); + new HttpAsyncResponseConsumerFactory + .HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024)); COMMON_OPTIONS = builder.build(); } // end::rest-client-options-singleton diff --git a/docs/build.gradle b/docs/build.gradle index 0463d6bf22305..580e3a64ada97 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -61,17 +61,7 @@ integTestCluster { systemProperty 'es.scripting.update.ctx_in_params', 'false' } -// remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed -if (rootProject.ext.compilerJavaVersion.isJava11()) { - integTestRunner { - systemProperty 'tests.rest.blacklist', [ - 'plugins/ingest-attachment/line_164', - 'plugins/ingest-attachment/line_117' - ].join(',') - } -} -// Build the cluster with all plugins - +// build the cluster with all plugins project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj -> /* Skip repositories. We just aren't going to be able to test them so it * doesn't make sense to waste time installing them. */ diff --git a/docs/java-rest/high-level/document/term-vectors.asciidoc b/docs/java-rest/high-level/document/term-vectors.asciidoc new file mode 100644 index 0000000000000..e739e37732fd4 --- /dev/null +++ b/docs/java-rest/high-level/document/term-vectors.asciidoc @@ -0,0 +1,101 @@ +-- +:api: term-vectors +:request: TermVectorsRequest +:response: TermVectorsResponse +-- + +[id="{upid}-{api}"] +=== Term Vectors API + +Term Vectors API returns information and statistics on terms in the fields +of a particular document. The document could be stored in the index or +artificially provided by the user. + + +[id="{upid}-{api}-request"] +==== Term Vectors Request + +A +{request}+ expects an `index`, a `type` and an `id` to specify +a certain document, and fields for which the information is retrieved. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + +Term vectors can also be generated for artificial documents, that is for +documents not present in the index: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-artificial] +-------------------------------------------------- +<1> An artificial document is provided as an `XContentBuilder` object, +the Elasticsearch built-in helper to generate JSON content. + +===== Optional arguments + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-optional-arguments] +-------------------------------------------------- +<1> Set `fieldStatistics` to `false` (default is `true`) to omit document count, +sum of document frequencies, sum of total term frequencies. +<2> Set `termStatistics` to `true` (default is `false`) to display +total term frequency and document frequency. +<3> Set `positions` to `false` (default is `true`) to omit the output of +positions. +<4> Set `offsets` to `false` (default is `true`) to omit the output of +offsets. +<5> Set `payloads` to `false` (default is `true`) to omit the output of +payloads. +<6> Set `filterSettings` to filter the terms that can be returned based +on their tf-idf scores. +<7> Set `perFieldAnalyzer` to specify a different analyzer than +the one that the field has. +<8> Set `realtime` to `false` (default is `true`) to retrieve term vectors +near realtime. +<9> Set a routing parameter + + +include::../execution.asciidoc[] + + +[id="{upid}-{api}-response"] +==== TermVectorsResponse + +The `TermVectorsResponse` contains the following information: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The index name of the document. +<2> The type name of the document. +<3> The id of the document. +<4> Indicates whether or not the document found. + + +===== Inspecting Term Vectors +If `TermVectorsResponse` contains non-null list of term vectors, +more information about each term vector can be obtained using the following: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-term-vectors] +-------------------------------------------------- +<1> The name of the current field +<2> Fields statistics for the current field - document count +<3> Fields statistics for the current field - sum of total term frequencies +<4> Fields statistics for the current field - sum of document frequencies +<5> Terms for the current field +<6> The name of the term +<7> Term frequency of the term +<8> Document frequency of the term +<9> Total term frequency of the term +<10> Score of the term +<11> Tokens of the term +<12> Position of the token +<13> Start offset of the token +<14> End offset of the token +<15> Payload of the token diff --git a/docs/java-rest/high-level/rollup/put_job.asciidoc b/docs/java-rest/high-level/rollup/put_job.asciidoc index 50b05189b1375..0b7ece05ca89b 100644 --- a/docs/java-rest/high-level/rollup/put_job.asciidoc +++ b/docs/java-rest/high-level/rollup/put_job.asciidoc @@ -119,68 +119,6 @@ include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup- <2> Adds the metrics to compute on the `temperature` field <3> Adds the metrics to compute on the `voltage` field -By default, metrics `min`/`max` for the fields in `DateHistogramGroupConfig` and -`HistogramGroupConfig` are added to the configuration unless the user already provided -metrics for those fields. - -So, for the following configuration: - -[source,js] --------------------------------------------------- -"groups" : { - "date_histogram": { - "field": "timestamp", - "interval": "1h", - "delay": "7d", - "time_zone": "UTC" - }, - "terms": { - "fields": ["hostname", "datacenter"] - }, - "histogram": { - "fields": ["load", "net_in", "net_out"], - "interval": 5 - }, -}, -"metrics": [ - { - "field": "load", - "metrics": ["max"] - }, - { - "field": "net_in", - "metrics": ["max"] - } -] --------------------------------------------------- -// NOTCONSOLE - -The following will be the metrics in the configuration after -the defaults are added server side. Note the default metrics -provided for the fields `timestamp` and `net_out` - -[source,js] --------------------------------------------------- -"metrics": [ - { - "field": "load", - "metrics": ["max"] - }, - { - "field": "net_in", - "metrics": ["max"] - }, - { - "field": "timestamp", - "metrics": ["min", "max"] - }, - { - "field": "net_out", - "metrics": ["min", "max"] - } -] --------------------------------------------------- -// NOTCONSOLE [[java-rest-high-x-pack-rollup-put-rollup-job-execution]] ==== Execution diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 6cde79a22e5c1..c8d16954abe2a 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -14,6 +14,7 @@ Single document APIs:: * <<{upid}-exists>> * <<{upid}-delete>> * <<{upid}-update>> +* <<{upid}-term-vectors>> [[multi-doc]] Multi-document APIs:: @@ -29,6 +30,7 @@ include::document/get.asciidoc[] include::document/exists.asciidoc[] include::document/delete.asciidoc[] include::document/update.asciidoc[] +include::document/term-vectors.asciidoc[] include::document/bulk.asciidoc[] include::document/multi-get.asciidoc[] include::document/reindex.asciidoc[] @@ -344,6 +346,7 @@ The Java High Level REST Client supports the following Watcher APIs: * <<{upid}-stop-watch-service>> * <> * <> +* <> * <<{upid}-ack-watch>> * <<{upid}-activate-watch>> @@ -352,6 +355,7 @@ include::watcher/stop-watch-service.asciidoc[] include::watcher/put-watch.asciidoc[] include::watcher/delete-watch.asciidoc[] include::watcher/ack-watch.asciidoc[] +include::watcher/deactivate-watch.asciidoc[] include::watcher/activate-watch.asciidoc[] == Graph APIs @@ -372,4 +376,4 @@ don't leak into the rest of the documentation. :response!: :doc-tests-file!: :upid!: --- +-- \ No newline at end of file diff --git a/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc b/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc new file mode 100644 index 0000000000000..673423b69b983 --- /dev/null +++ b/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc @@ -0,0 +1,10 @@ +-- +:api: deactivate-watch +:request: deactivateWatchRequet +:response: deactivateWatchResponse +:doc-tests-file: {doc-tests}/WatcherDocumentationIT.java +-- +[[java-rest-high-watcher-deactivate-watch]] +=== Deactivate Watch API + +include::../execution.asciidoc[] diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 81bc96bb8f92e..1442f0e3e558e 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -234,6 +234,13 @@ specific index module: The length of time that a <> remains available for <>. Defaults to `60s`. + `index.default_pipeline`:: + + The default <> pipeline for this index. Index requests will fail + if the default pipeline is set and the pipeline does not exist. The default may be + overridden using the `pipeline` parameter. The special pipeline name `_none` indicates + no ingest pipeline should be run. + [float] === Settings in other index modules diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc index 051e78fc44297..e70f461d3bc23 100644 --- a/docs/reference/indices/templates.asciidoc +++ b/docs/reference/indices/templates.asciidoc @@ -46,9 +46,9 @@ PUT _template/template_1 NOTE: Index templates provide C-style /* */ block comments. Comments are allowed everywhere in the JSON document except before the initial opening curly bracket. -Defines a template named `template_1`, with a template pattern of `te*`. +Defines a template named `template_1`, with a template pattern of `te*` or `bar*`. The settings and mappings will be applied to any index name that matches -the `te*` pattern. +the `te*` or `bar*` pattern. It is also possible to include aliases in an index template as follows: diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index 772013534b63b..6fa2e8c796df6 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -35,6 +35,9 @@ PUT my-index/_doc/my-id?pipeline=my_pipeline_id // CONSOLE // TEST[catch:bad_request] +An index may also declare a <> that will be used in the +absence of the `pipeline` parameter. + See <> for more information about creating, adding, and deleting pipelines. -- diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index bad758c8a3c0d..eeb914facc2c6 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -776,16 +776,16 @@ Accepts a single value or an array of values. [options="header"] |====== | Name | Required | Default | Description -| `field` | yes | - | The field to be appended to -| `value` | yes | - | The value to be appended +| `field` | yes | - | The field to be appended to. Supports <>. +| `value` | yes | - | The value to be appended. Supports <>. |====== [source,js] -------------------------------------------------- { "append": { - "field": "field1", - "value": ["item2", "item3", "item4"] + "field": "tags", + "value": ["production", "{{app}}", "{{owner}}"] } } -------------------------------------------------- @@ -812,7 +812,7 @@ the field is not a supported format or resultant value exceeds 2^63. -------------------------------------------------- { "bytes": { - "field": "foo" + "field": "file.size" } } -------------------------------------------------- @@ -850,7 +850,7 @@ still be updated with the unconverted field value. -------------------------------------------------- { "convert": { - "field" : "foo", + "field" : "url.port", "type": "integer" } } @@ -874,8 +874,8 @@ in the same order they were defined as part of the processor definition. | `field` | yes | - | The field to get the date from. | `target_field` | no | @timestamp | The field that will hold the parsed date. | `formats` | yes | - | An array of the expected date formats. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. -| `timezone` | no | UTC | The timezone to use when parsing the date. -| `locale` | no | ENGLISH | The locale to use when parsing the date, relevant when parsing month names or week days. +| `timezone` | no | UTC | The timezone to use when parsing the date. Supports <>. +| `locale` | no | ENGLISH | The locale to use when parsing the date, relevant when parsing month names or week days. Supports <>. |====== Here is an example that adds the parsed date to the `timestamp` field based on the `initial_date` field: @@ -913,8 +913,8 @@ the timezone and locale values. "field" : "initial_date", "target_field" : "timestamp", "formats" : ["ISO8601"], - "timezone" : "{{ my_timezone }}", - "locale" : "{{ my_locale }}" + "timezone" : "{{my_timezone}}", + "locale" : "{{my_locale}}" } } ] @@ -1059,12 +1059,12 @@ understands this to mean `2016-04-01` as is explained in the <>. +| `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports <>. | `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. | `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. | `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. -| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid Joda pattern is expected here. +| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid Joda pattern is expected here. Supports <>. |====== [[dissect-processor]] @@ -1260,6 +1260,21 @@ Reference key modifier example * error = REFUSED |====== +[[drop-processor]] +=== Drop Processor +Drops the document without raising any errors. This is useful to prevent the document from +getting indexed based on some condition. + +[source,js] +-------------------------------------------------- +{ + "drop": { + "if" : "ctx.network_name == 'Guest'" + } +} +-------------------------------------------------- +// NOTCONSOLE + [[dot-expand-processor]] === Dot Expander Processor @@ -1390,14 +1405,15 @@ to the requester. [options="header"] |====== | Name | Required | Default | Description -| `message` | yes | - | The error message of the `FailException` thrown by the processor +| `message` | yes | - | The error message thrown by the processor. Supports <>. |====== [source,js] -------------------------------------------------- { "fail": { - "message": "an error message" + "if" : "ctx.tags.contains('production') != true", + "message": "The production tag is not present, found tags: {{tags}}" } } -------------------------------------------------- @@ -2093,6 +2109,120 @@ Converts a string to its lowercase equivalent. -------------------------------------------------- // NOTCONSOLE +[[pipeline-processor]] +=== Pipeline Processor +Executes another pipeline. + +[[pipeline-options]] +.Pipeline Options +[options="header"] +|====== +| Name | Required | Default | Description +| `name` | yes | - | The name of the pipeline to execute +|====== + +[source,js] +-------------------------------------------------- +{ + "pipeline": { + "name": "inner-pipeline" + } +} +-------------------------------------------------- +// NOTCONSOLE + +An example of using this processor for nesting pipelines would be: + +Define an inner pipeline: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/pipelineA +{ + "description" : "inner pipeline", + "processors" : [ + { + "set" : { + "field": "inner_pipeline_set", + "value": "inner" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +Define another pipeline that uses the previously defined inner pipeline: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/pipelineB +{ + "description" : "outer pipeline", + "processors" : [ + { + "pipeline" : { + "name": "pipelineA" + } + }, + { + "set" : { + "field": "outer_pipeline_set", + "value": "outer" + } + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Now indexing a document while applying the outer pipeline will see the inner pipeline executed +from the outer pipeline: + +[source,js] +-------------------------------------------------- +PUT /myindex/_doc/1?pipeline=pipelineB +{ + "field": "value" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Response from the index request: + +[source,js] +-------------------------------------------------- +{ + "_index": "myindex", + "_type": "_doc", + "_id": "1", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 1, + "failed": 0 + }, + "_seq_no": 0, + "_primary_term": 1, +} +-------------------------------------------------- +// TESTRESPONSE + +Indexed document: + +[source,js] +-------------------------------------------------- +{ + "field": "value", + "inner_pipeline_set": "inner", + "outer_pipeline_set": "outer" +} +-------------------------------------------------- +// NOTCONSOLE + [[remove-processor]] === Remove Processor Removes existing fields. If one field doesn't exist, an exception will be thrown. @@ -2102,7 +2232,7 @@ Removes existing fields. If one field doesn't exist, an exception will be thrown [options="header"] |====== | Name | Required | Default | Description -| `field` | yes | - | Fields to be removed +| `field` | yes | - | Fields to be removed. Supports <>. | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document |====== @@ -2112,7 +2242,7 @@ Here is an example to remove a single field: -------------------------------------------------- { "remove": { - "field": "foo" + "field": "user_agent" } } -------------------------------------------------- @@ -2124,7 +2254,7 @@ To remove multiple fields, you can use the following query: -------------------------------------------------- { "remove": { - "field": ["foo", "bar"] + "field": ["user_agent", "url"] } } -------------------------------------------------- @@ -2138,18 +2268,18 @@ Renames an existing field. If the field doesn't exist or the new name is already .Rename Options [options="header"] |====== -| Name | Required | Default | Description -| `field` | yes | - | The field to be renamed -| `target_field` | yes | - | The new name of the field -| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| Name | Required | Default | Description +| `field` | yes | - | The field to be renamed. Supports <>. +| `target_field` | yes | - | The new name of the field. Supports <>. +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document |====== [source,js] -------------------------------------------------- { "rename": { - "field": "foo", - "target_field": "foobar" + "field": "provider", + "target_field": "cloud.provider" } } -------------------------------------------------- @@ -2267,18 +2397,18 @@ its value will be replaced with the provided one. .Set Options [options="header"] |====== -| Name | Required | Default | Description -| `field` | yes | - | The field to insert, upsert, or update -| `value` | yes | - | The value to be set for the field -| `override`| no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. +| Name | Required | Default | Description +| `field` | yes | - | The field to insert, upsert, or update. Supports <>. +| `value` | yes | - | The value to be set for the field. Supports <>. +| `override` | no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. |====== [source,js] -------------------------------------------------- { "set": { - "field": "field1", - "value": 582.1 + "field": "host.os.name", + "value": "{{os}}" } } -------------------------------------------------- @@ -2331,7 +2461,7 @@ Throws an error when the field is not an array. -------------------------------------------------- { "sort": { - "field": "field_to_sort", + "field": "array_field_to_sort", "order": "desc" } } diff --git a/docs/reference/migration/migrate_7_0/cluster.asciidoc b/docs/reference/migration/migrate_7_0/cluster.asciidoc index d518d29987d3b..7343154175b22 100644 --- a/docs/reference/migration/migrate_7_0/cluster.asciidoc +++ b/docs/reference/migration/migrate_7_0/cluster.asciidoc @@ -18,3 +18,10 @@ primary shards of the opened index to be allocated. [float] ==== Shard preferences `_primary`, `_primary_first`, `_replica`, and `_replica_first` are removed These shard preferences are removed in favour of the `_prefer_nodes` and `_only_nodes` preferences. + +[float] +==== Cluster-wide shard soft limit +Clusters now have soft limits on the total number of open shards in the cluster +based on the number of nodes and the `cluster.max_shards_per_node` cluster +setting, to prevent accidental operations that would destabilize the cluster. +More information can be found in the <>. \ No newline at end of file diff --git a/docs/reference/migration/migrate_7_0/java.asciidoc b/docs/reference/migration/migrate_7_0/java.asciidoc index 7d68ff2fb5737..4357b3fa72857 100644 --- a/docs/reference/migration/migrate_7_0/java.asciidoc +++ b/docs/reference/migration/migrate_7_0/java.asciidoc @@ -22,6 +22,12 @@ appropriate request directly. * All classes present in `org.elasticsearch.search.aggregations.metrics.*` packages were moved to a single `org.elasticsearch.search.aggregations.metrics` package. +* All classes present in `org.elasticsearch.search.aggregations.pipeline.*` packages +were moved to a single `org.elasticsearch.search.aggregations.pipeline` package. In +addition, `org.elasticsearch.search.aggregations.pipeline.PipelineAggregationBuilders` +was moved to `org.elasticsearch.search.aggregations.PipelineAggregationBuilders` + + [float] ==== `Retry.withBackoff` methods with `Settings` removed diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 3f12bd255de75..f397c3075b711 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -22,6 +22,48 @@ user with access to the <> API can make the cluster read-write again. +[[cluster-shard-limit]] + +==== Cluster Shard Limit + +In a Elasticsearch 7.0 and later, there will be a soft limit on the number of +shards in a cluster, based on the number of nodes in the cluster. This is +intended to prevent operations which may unintentionally destabilize the +cluster. Prior to 7.0, actions which would result in the cluster going over the +limit will issue a deprecation warning. + +NOTE: You can set the system property `es.enforce_max_shards_per_node` to `true` +to opt in to strict enforcement of the shard limit. If this system property is +set, actions which would result in the cluster going over the limit will result +in an error, rather than a deprecation warning. This property will be removed in +Elasticsearch 7.0, as strict enforcement of the limit will be the default and +only behavior. + +If an operation, such as creating a new index, restoring a snapshot of an index, +or opening a closed index would lead to the number of shards in the cluster +going over this limit, the operation will issue a deprecation warning. + +If the cluster is already over the limit, due to changes in node membership or +setting changes, all operations that create or open indices will issue warnings +until either the limit is increased as described below, or some indices are +<> or <> to bring the +number of shards below the limit. + +Replicas count towards this limit, but closed indexes do not. An index with 5 +primary shards and 2 replicas will be counted as 15 shards. Any closed index +is counted as 0, no matter how many shards and replicas it contains. + +The limit defaults to 1,000 shards per node, and be dynamically adjusted using +the following property: + +`cluster.max_shards_per_node`:: + + Controls the number of shards allowed in the cluster per node. + +For example, a 3-node cluster with the default setting would allow 3,000 shards +total, across all open indexes. If the above setting is changed to 1,500, then +the cluster would allow 4,500 shards total. + [[user-defined-data]] ==== User Defined Cluster Metadata @@ -109,4 +151,4 @@ Enable or disable allocation for persistent tasks: This setting does not affect the persistent tasks that are already being executed. Only newly created persistent tasks, or tasks that must be reassigned (after a node left the cluster, for example), are impacted by this setting. --- \ No newline at end of file +-- diff --git a/docs/reference/modules/indices.asciidoc b/docs/reference/modules/indices.asciidoc index 5f7bb7b9abae5..33ab7ecb4a814 100644 --- a/docs/reference/modules/indices.asciidoc +++ b/docs/reference/modules/indices.asciidoc @@ -30,6 +30,10 @@ Available settings include: Control the resource limits on the shard recovery process. +<>:: + + Control global search settings. + include::indices/circuit_breaker.asciidoc[] include::indices/fielddata.asciidoc[] @@ -42,3 +46,5 @@ include::indices/request_cache.asciidoc[] include::indices/recovery.asciidoc[] +include::indices/search-settings.asciidoc[] + diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc new file mode 100644 index 0000000000000..ad75de1291cdc --- /dev/null +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -0,0 +1,16 @@ +[[search-settings]] +=== Search Settings + +The following _expert_ setting can be set to manage global search limits. + +`indices.query.bool.max_clause_count`:: + Defaults to `1024`. + +This setting limits the number of clauses a Lucene BooleanQuery can have. The +default of 1024 is quite high and should normally be sufficient. This limit does +not only affect Elasticsearchs `bool` query, but many other queries are rewritten to Lucene's +BooleanQuery internally. The limit is in place to prevent searches from becoming to large +and taking up too much CPU and memory. In case you consider to increase this setting, +make sure you exhausted all other options to avoid having to do this. Higher values can lead +to performance degradations and memory issues, especially in clusters with a high load or +few resources. diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc new file mode 100644 index 0000000000000..3d4a37861f1ae --- /dev/null +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -0,0 +1,195 @@ +[role="xpack"] +[testenv="gold"] +[[configuring-metricbeat]] +=== Monitoring {es} with {metricbeat} + +beta[] In 6.5 and later, you can use {metricbeat} to collect data about {es} +and ship it to the monitoring cluster, rather than routing it through exporters +as described in <>. + +image::monitoring/images/metricbeat.png[Example monitoring architecture] + +To learn about monitoring in general, see +{stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. + +. Enable the collection of monitoring data. Set +`xpack.monitoring.collection.enabled` to `true` on the production cluster. + ++ +-- +For example, you can use the following APIs to review and change this setting: + +[source,js] +---------------------------------- +GET _cluster/settings + +PUT _cluster/settings +{ + "persistent": { + "xpack.monitoring.collection.enabled": true + } +} +---------------------------------- +// CONSOLE + +For more information, see <> and <>. +-- + +. Disable the default collection of {es} monitoring metrics. Set +`xpack.monitoring.elasticsearch.collection.enabled` to `false` on the production +cluster. + ++ +-- +For example, you can use the following API to change this setting: + +[source,js] +---------------------------------- +PUT _cluster/settings +{ + "persistent": { + "xpack.monitoring.elasticsearch.collection.enabled": false + } +} +---------------------------------- +// CONSOLE + +Leave `xpack.monitoring.enabled` set to its default value (`true`). +-- + +. On each {es} node in the production cluster: + +.. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}]. + +.. Enable the {es} module in {metricbeat}. + ++ +-- +For example, to enable the default configuration in the `modules.d` directory, +run the following command: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +metricbeat modules enable elasticsearch +---------------------------------------------------------------------- + +For more information, see +{metricbeat-ref}/configuration-metricbeat.html[Specify which modules to run] and +{metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module]. +-- + +.. Configure the {es} module in {metricbeat}. + ++ +-- +You must specify the following settings in the `modules.d/elasticsearch.yml` file: + +[source,yaml] +---------------------------------- +- module: elasticsearch + metricsets: + - ccr + - cluster_stats + - index + - index_recovery + - index_summary + - ml_job + - node_stats + - shard + period: 10s + hosts: ["http://localhost:9200"] <1> + xpack.enabled: true +---------------------------------- +<1> This setting identifies the host and port number that are used to access {es}. +-- + +.. If {security} is enabled, you must also provide a user ID and password so that +{metricbeat} can collect metrics successfully. + +... Create or identify a user that you want to use to collect the metrics. ++ +-- +TIP: There is a `remote_monitoring_user` built-in user that grants the privileges +necessary for {metricbeat} to monitor {stack} products. See +{stack-ov}/built-in-users.html[Built-in users]. + +Alternatively, you can choose a different user and give them the +`remote_monitoring_collector` {stack-ov}/built-in-roles.html[built-in role]. +-- + +... Add the `username` and `password` settings to the {es} module configuration +file. ++ +-- +For example, add the following settings in the `modules.d/kibana.yml` file: + +[source,yaml] +---------------------------------- +- module: elasticsearch + ... + username: remote_monitoring_user + password: YOUR_PASSWORD +---------------------------------- +-- + +.. If you configured {es} to use <>, +you must access it via HTTPS. For example, use a `hosts` setting like +`https://localhost:9200` in the `modules.d/elasticsearch.yml` file. + +.. Identify where to send the monitoring data. + ++ +-- +TIP: In production environments, we strongly recommend using a separate cluster +(referred to as the _monitoring cluster_) to store the data. Using a separate +monitoring cluster prevents production cluster outages from impacting your +ability to access your monitoring data. It also prevents monitoring activities +from impacting the performance of your production cluster. + +For example, specify the {es} output information in the {metricbeat} +configuration file (`metricbeat.yml`): + +[source,yaml] +---------------------------------- +output.elasticsearch: + hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1> +---------------------------------- +<1> In this example, the data is stored on a monitoring cluster with nodes +`es-mon-1` and `es-mon-2`. + +For more information about these configuration options, see +{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. +-- + +.. If {security} is enabled on the monitoring cluster, you must provide a valid +user ID and password so that {metricbeat} can send metrics successfully. + +... Create or identify a user that you want to use to send the metrics. ++ +-- +TIP: There is a `remote_monitoring_user` built-in user that grants the privileges +necessary for {metricbeat} to monitor {stack} products. See +{stack-ov}/built-in-users.html[Built-in users]. + +Alternatively, you can choose a different user and give them the +`remote_monitoring_agent` {stack-ov}/built-in-roles.html[built-in role]. +-- + +... Add the `username` and `password` settings to the {es} output information in +the {metricbeat} configuration file (`metricbeat.yml`): ++ +-- +[source,yaml] +---------------------------------- +output.elasticsearch: + ... + username: remote_monitoring_user + password: YOUR_PASSWORD +---------------------------------- +-- + +.. If you configured the monitoring cluster to use +<>, you must access it via +HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200` in the +`metricbeat.yml` file. + +. <>. + +. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}]. + +. {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}]. diff --git a/docs/reference/monitoring/configuring-monitoring.asciidoc b/docs/reference/monitoring/configuring-monitoring.asciidoc index 6708b791036a9..81a9cce4f12ec 100644 --- a/docs/reference/monitoring/configuring-monitoring.asciidoc +++ b/docs/reference/monitoring/configuring-monitoring.asciidoc @@ -6,18 +6,27 @@ Configuring monitoring ++++ -By default, {monitoring} is enabled but data collection is disabled. Advanced -monitoring settings enable you to control how frequently data is collected, -configure timeouts, and set the retention period for locally-stored monitoring -indices. You can also adjust how monitoring data is displayed. +If you enable the collection of monitoring data in your cluster, you can +optionally collect metrics about {es}. By default, {monitoring} is enabled but +data collection is disabled. + +The following method involves sending the metrics to the monitoring cluster by +using exporters. For an alternative method, see <>. + +Advanced monitoring settings enable you to control how frequently data is +collected, configure timeouts, and set the retention period for locally-stored +monitoring indices. You can also adjust how monitoring data is displayed. + +To learn about monitoring in general, see +{stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. . To collect monitoring data about your {es} cluster: .. Verify that the `xpack.monitoring.enabled`, `xpack.monitoring.collection.enabled`, and `xpack.monitoring.elasticsearch.collection.enabled` settings are `true` on each -node in the cluster. By default xpack.monitoring.collection.enabled is disabled -(`false`), and that overrides xpack.monitoring.elasticsearch.collection.enabled, +node in the cluster. By default `xpack.monitoring.collection.enabled` is disabled +(`false`), and that overrides `xpack.monitoring.elasticsearch.collection.enabled`, which defaults to being enabled (`true`). Both settings can be set dynamically at runtime. For more information, see <>. @@ -69,8 +78,9 @@ see {stack-ov}/how-monitoring-works.html[How Monitoring Works]. a dedicated monitoring cluster: .. Create a user on the monitoring cluster that has the -{xpack-ref}/built-in-roles.html#built-in-roles-remote-monitoring-agent[`remote_monitoring_agent` built-in role]. For example, the following request -creates a `remote_monitor` user that has the `remote_monitoring_agent` role: +{stack-ov}/built-in-roles.html#built-in-roles-remote-monitoring-agent[`remote_monitoring_agent` built-in role]. +For example, the following request creates a `remote_monitor` user that has the +`remote_monitoring_agent` role: + -- [source, sh] @@ -87,12 +97,17 @@ POST /_xpack/security/user/remote_monitor -- .. On each node in the cluster that is being monitored, configure the `http` -exporter to use the appropriate credentials when data is shipped to the monitoring cluster. +exporter to use the appropriate credentials when data is shipped to the +monitoring cluster. + -- -If SSL/TLS is enabled on the monitoring cluster, you must use the HTTPS protocol in the `host` setting. You must also include the CA certificate in each node's trusted certificates in order to verify the identities of the nodes in the monitoring cluster. +If SSL/TLS is enabled on the monitoring cluster, you must use the HTTPS protocol +in the `host` setting. You must also include the CA certificate in each node's +trusted certificates in order to verify the identities of the nodes in the +monitoring cluster. -The following example specifies the location of the PEM encoded certificate with the `certificate_authorities` setting: +The following example specifies the location of the PEM encoded certificate with +the `certificate_authorities` setting: [source,yaml] -------------------------------------------------- @@ -144,5 +159,8 @@ stored, that is to say the monitoring cluster. To grant all of the necessary per . Optional: <>. +. {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}]. + +include::configuring-metricbeat.asciidoc[] include::indices.asciidoc[] include::{es-repo-dir}/settings/monitoring-settings.asciidoc[] \ No newline at end of file diff --git a/docs/reference/monitoring/images/metricbeat.png b/docs/reference/monitoring/images/metricbeat.png new file mode 100644 index 0000000000000..bf6434dc4b40c Binary files /dev/null and b/docs/reference/monitoring/images/metricbeat.png differ diff --git a/docs/reference/monitoring/indices.asciidoc b/docs/reference/monitoring/indices.asciidoc index 658ac389ae80a..34cbced1c4332 100644 --- a/docs/reference/monitoring/indices.asciidoc +++ b/docs/reference/monitoring/indices.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[config-monitoring-indices]] -=== Configuring Indices for Monitoring +=== Configuring indices for monitoring <> are used to configure the indices that store the monitoring data collected from a cluster. diff --git a/docs/reference/rollup/apis/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc index 8ba126a8846bc..794d72480121b 100644 --- a/docs/reference/rollup/apis/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -88,13 +88,6 @@ Which will yield the following response: "metrics" : [ "avg" ] - }, - { - "field": "timestamp", - "metrics": [ - "max", - "min" - ] } ], "timeout" : "20s", @@ -215,13 +208,6 @@ Which will yield the following response: "metrics" : [ "avg" ] - }, - { - "field": "timestamp", - "metrics": [ - "min", - "max" - ] } ], "timeout" : "20s", @@ -271,13 +257,6 @@ Which will yield the following response: "metrics" : [ "avg" ] - }, - { - "field": "timestamp", - "metrics": [ - "min", - "max" - ] } ], "timeout" : "20s", diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index 55568933d89d2..79e30ae8dc99b 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -68,7 +68,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "60m", + "interval": "1h", "delay": "7d" }, "terms": { @@ -98,84 +98,4 @@ When the job is created, you receive the following results: "acknowledged": true } ---- -// TESTRESPONSE - -By default the metrics `min`/`max` are added -for the fields in the `date_histogram` and `histogram` configurations. -If this behavior is not desired, explicitly configure metrics -for those fields. This will override the defaults. - -If the following is provided - -[source,js] --------------------------------------------------- -PUT _xpack/rollup/job/sensor2 -{ - "index_pattern": "sensor-*", - "rollup_index": "sensor_rollup", - "cron": "*/30 * * * * ?", - "page_size" :1000, - "groups" : { - "date_histogram": { - "field": "timestamp", - "interval": "60m", - "delay": "7d" - }, - "histogram": { - "fields": ["voltage", "temperature"], - "interval": 5 - } - }, - "metrics": [ - { - "field": "temperature", - "metrics": ["min", "max", "sum"] - } - ] -} --------------------------------------------------- -// NOTCONSOLE -// TEST[setup:sensor_index] - -The actual config when created in the cluster will look as follows. - -[source,js] --------------------------------------------------- -{ - "index_pattern": "sensor-*", - "rollup_index": "sensor_rollup", - "cron": "*/30 * * * * ?", - "page_size" :1000, - "groups" : { - "date_histogram": { - "field": "timestamp", - "interval": "60m", - "delay": "7d" - }, - "histogram": { - "fields": ["voltage", "temperature"], - "interval": 5 - } - }, - "metrics": [ - { - "field": "temperature", - "metrics": ["min", "max", "sum"] - }, - { - "field": "voltage", <1> - "metrics": ["min", "max"] - }, - { - "field": "timestamp", <1> - "metrics": ["min", "max"] - } - ] -} --------------------------------------------------- -// NOTCONSOLE -<1> Notice the new default metrics gathered for `voltage` and `timestamp`. - Since these fields were referenced in `groups.histogram` and - `groups.date_histogram` configurations - respectively and no metrics were requested for them, - they both got the default metrics of `["min", "max"]`. +// TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/rollup/apis/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc index 6679c4c5f06db..274037cae8f2f 100644 --- a/docs/reference/rollup/apis/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -124,12 +124,6 @@ Which will yield the following response: "time_zone" : "UTC", "interval" : "1h", "delay": "7d" - }, - { - "agg": "max" - }, - { - "agg": "min" } ], "voltage" : [ diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc index 64c5c5ac784f2..df314fb458b5c 100644 --- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -120,12 +120,6 @@ This will yield the following response: "time_zone" : "UTC", "interval" : "1h", "delay": "7d" - }, - { - "agg" : "max" - }, - { - "agg" : "min" } ], "voltage" : [ diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index c6a571f4b8fc8..4b96fe0e70678 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -109,7 +109,9 @@ request) tells Elasticsearch how long it should keep the search context alive. Its value (e.g. `1m`, see <>) does not need to be long enough to process all data -- it just needs to be long enough to process the previous batch of results. Each `scroll` request (with the `scroll` parameter) sets a -new expiry time. +new expiry time. If a `scroll` request doesn't pass in the `scroll` +parameter, then the search context will be freed as part of _that_ `scroll` +request. Normally, the background merge process optimizes the index by merging together smaller segments to create new bigger segments, at diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index a039084412cda..92c51772720c9 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[monitoring-settings]] -=== Monitoring Settings in Elasticsearch +=== Monitoring settings in Elasticsearch ++++ -Monitoring Settings +Monitoring settings ++++ By default, monitoring is enabled but data collection is disabled. To enable @@ -43,17 +43,14 @@ to `true`. Its default value is `false`. The `xpack.monitoring.collection` settings control how data is collected from your Elasticsearch nodes. -`xpack.monitoring.collection.enabled`:: +`xpack.monitoring.collection.enabled`:: (<>) added[6.3.0] Set to `true` to enable the collection of monitoring data. When this setting is `false` (default), {es} monitoring data is not collected and all monitoring data from other sources such as {kib}, Beats, and Logstash is ignored. -+ -You can update this setting through the -<>. -`xpack.monitoring.collection.interval`:: +`xpack.monitoring.collection.interval`:: (<>) Setting to `-1` to disable data collection is no longer supported beginning with 7.0.0. deprecated[6.3.0, Use `xpack.monitoring.collection.enabled` set to @@ -62,35 +59,26 @@ Setting to `-1` to disable data collection is no longer supported beginning with Controls how often data samples are collected. Defaults to `10s`. If you modify the collection interval, set the `xpack.monitoring.min_interval_seconds` option in `kibana.yml` to the same value. -+ -You can update this setting through the -<>. -`xpack.monitoring.elasticsearch.collection.enabled`:: +`xpack.monitoring.elasticsearch.collection.enabled`:: (<>) Controls whether statistics about your {es} cluster should be collected. Defaults to `true`. This is different from xpack.monitoring.collection.enabled, which allows you to enable or disable all monitoring collection. However, this setting simply disables the collection of Elasticsearch data while still allowing other data (e.g., Kibana, Logstash, Beats, or APM Server monitoring data) to pass through this cluster. -+ -You can update this setting through the -<>. `xpack.monitoring.collection.cluster.stats.timeout`:: Sets the timeout for collecting the cluster statistics. Defaults to `10s`. -`xpack.monitoring.collection.indices`:: +`xpack.monitoring.collection.indices`:: (<>) Controls which indices Monitoring collects data from. Defaults to all indices. Specify the index names as a comma-separated list, for example `test1,test2,test3`. Names can include wildcards, for example `test*`. You can explicitly include or exclude indices by prepending `+` to include the index, or `-` to exclude the index. For example, to include all indices that start with `test` except `test3`, you could specify `+test*,-test3`. -+ -You can update this setting through the -<>. `xpack.monitoring.collection.index.stats.timeout`:: diff --git a/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java b/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java index 9283283ab0861..368886c7fd3d6 100644 --- a/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java +++ b/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java @@ -47,7 +47,8 @@ public void testCharsBeginsWith() { assertFalse(CharArrays.charsBeginsWith(randomAlphaOfLength(4), null)); assertFalse(CharArrays.charsBeginsWith(null, null)); assertFalse(CharArrays.charsBeginsWith(null, randomAlphaOfLength(4).toCharArray())); - assertFalse(CharArrays.charsBeginsWith(randomAlphaOfLength(2), randomAlphaOfLengthBetween(3, 8).toCharArray())); + final String undesiredPrefix = randomAlphaOfLength(2); + assertFalse(CharArrays.charsBeginsWith(undesiredPrefix, randomAlphaOfLengthNotBeginningWith(undesiredPrefix, 3, 8))); final String prefix = randomAlphaOfLengthBetween(2, 4); assertTrue(CharArrays.charsBeginsWith(prefix, prefix.toCharArray())); @@ -72,4 +73,12 @@ public void testConstantTimeEquals() { assertFalse(CharArrays.constantTimeEquals(value, other)); assertFalse(CharArrays.constantTimeEquals(value.toCharArray(), other.toCharArray())); } + + private char[] randomAlphaOfLengthNotBeginningWith(String undesiredPrefix, int min, int max) { + char[] nonMatchingValue; + do { + nonMatchingValue = randomAlphaOfLengthBetween(min, max).toCharArray(); + } while (new String(nonMatchingValue).startsWith(undesiredPrefix)); + return nonMatchingValue; + } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index d0cc929b56d24..219c3c5bbbae4 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -324,9 +324,31 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur switch (token) { case START_OBJECT: parseValue(parser, fieldParser, currentFieldName, value, context); + /* + * Well behaving parsers should consume the entire object but + * asserting that they do that is not something we can do + * efficiently here. Instead we can check that they end on an + * END_OBJECT. They could end on the *wrong* end object and + * this test won't catch them, but that is the price that we pay + * for having a cheap test. + */ + if (parser.currentToken() != XContentParser.Token.END_OBJECT) { + throw new IllegalStateException("parser for [" + currentFieldName + "] did not end on END_OBJECT"); + } break; case START_ARRAY: parseArray(parser, fieldParser, currentFieldName, value, context); + /* + * Well behaving parsers should consume the entire array but + * asserting that they do that is not something we can do + * efficiently here. Instead we can check that they end on an + * END_ARRAY. They could end on the *wrong* end array and + * this test won't catch them, but that is the price that we pay + * for having a cheap test. + */ + if (parser.currentToken() != XContentParser.Token.END_ARRAY) { + throw new IllegalStateException("parser for [" + currentFieldName + "] did not end on END_ARRAY"); + } break; case END_OBJECT: case END_ARRAY: diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index 42d53bf49859b..889f1619614aa 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -34,6 +34,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; @@ -650,6 +651,49 @@ public void setArray(List testArray) { assertThat(ex.getMessage(), containsString("[foo] failed to parse field [int_array]")); } + public void testNoopDeclareObject() throws IOException { + ObjectParser, Void> parser = new ObjectParser<>("noopy", AtomicReference::new); + parser.declareString(AtomicReference::set, new ParseField("body")); + parser.declareObject((a,b) -> {}, (p, c) -> null, new ParseField("noop")); + + assertEquals("i", parser.parse(createParser(JsonXContent.jsonXContent, "{\"body\": \"i\"}"), null).get()); + Exception garbageException = expectThrows(IllegalStateException.class, () -> parser.parse( + createParser(JsonXContent.jsonXContent, "{\"noop\": {\"garbage\": \"shouldn't\"}}"), + null)); + assertEquals("parser for [noop] did not end on END_OBJECT", garbageException.getMessage()); + Exception sneakyException = expectThrows(IllegalStateException.class, () -> parser.parse( + createParser(JsonXContent.jsonXContent, "{\"noop\": {\"body\": \"shouldn't\"}}"), + null)); + assertEquals("parser for [noop] did not end on END_OBJECT", sneakyException.getMessage()); + } + + public void testNoopDeclareField() throws IOException { + ObjectParser, Void> parser = new ObjectParser<>("noopy", AtomicReference::new); + parser.declareString(AtomicReference::set, new ParseField("body")); + parser.declareField((a,b) -> {}, (p, c) -> null, new ParseField("noop"), ValueType.STRING_ARRAY); + + assertEquals("i", parser.parse(createParser(JsonXContent.jsonXContent, "{\"body\": \"i\"}"), null).get()); + Exception e = expectThrows(IllegalStateException.class, () -> parser.parse( + createParser(JsonXContent.jsonXContent, "{\"noop\": [\"ignored\"]}"), + null)); + assertEquals("parser for [noop] did not end on END_ARRAY", e.getMessage()); + } + + public void testNoopDeclareObjectArray() throws IOException { + ObjectParser, Void> parser = new ObjectParser<>("noopy", AtomicReference::new); + parser.declareString(AtomicReference::set, new ParseField("body")); + parser.declareObjectArray((a,b) -> {}, (p, c) -> null, new ParseField("noop")); + + XContentParseException garbageError = expectThrows(XContentParseException.class, () -> parser.parse( + createParser(JsonXContent.jsonXContent, "{\"noop\": [{\"garbage\": \"shouldn't\"}}]"), + null)); + assertEquals("expected value but got [FIELD_NAME]", garbageError.getCause().getMessage()); + XContentParseException sneakyError = expectThrows(XContentParseException.class, () -> parser.parse( + createParser(JsonXContent.jsonXContent, "{\"noop\": [{\"body\": \"shouldn't\"}}]"), + null)); + assertEquals("expected value but got [FIELD_NAME]", sneakyError.getCause().getMessage()); + } + static class NamedObjectHolder { public static final ObjectParser PARSER = new ObjectParser<>("named_object_holder", NamedObjectHolder::new); diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/10_basic.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/10_basic.yml index cde34dfa10760..2416d2b2b3141 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/10_basic.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for Matrix Aggs Plugin # "Matrix stats aggs loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/10_basic.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/10_basic.yml index b9b905639fd70..ca6cd2e953be4 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/10_basic.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/10_basic.yml @@ -1,4 +1,7 @@ "Module loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml index eb23b7840ee6a..f83a9e78cb3fe 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml @@ -1,4 +1,7 @@ "Ingest common installed": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java similarity index 59% rename from modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java rename to modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java index 7a251f6e6fd29..120e8a9cabf7a 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java @@ -26,27 +26,23 @@ import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; import org.elasticsearch.script.GeneralScriptException; -import org.elasticsearch.script.SearchScript; +import org.elasticsearch.script.ScoreScript; import java.io.IOException; /** * A bridge to evaluate an {@link Expression} against {@link Bindings} in the context - * of a {@link SearchScript}. + * of a {@link ScoreScript}. */ -class ExpressionSearchScript implements SearchScript.LeafFactory { +class ExpressionScoreScript implements ScoreScript.LeafFactory { - final Expression exprScript; - final SimpleBindings bindings; - final DoubleValuesSource source; - final ReplaceableConstDoubleValueSource specialValue; // _value - final boolean needsScores; + private final Expression exprScript; + private final DoubleValuesSource source; + private final boolean needsScores; - ExpressionSearchScript(Expression e, SimpleBindings b, ReplaceableConstDoubleValueSource v, boolean needsScores) { - exprScript = e; - bindings = b; - source = exprScript.getDoubleValuesSource(bindings); - specialValue = v; + ExpressionScoreScript(Expression e, SimpleBindings b, boolean needsScores) { + this.exprScript = e; + this.source = exprScript.getDoubleValuesSource(b); this.needsScores = needsScores; } @@ -55,15 +51,14 @@ public boolean needs_score() { return needsScores; } - @Override - public SearchScript newInstance(final LeafReaderContext leaf) throws IOException { - return new SearchScript(null, null, null) { + public ScoreScript newInstance(final LeafReaderContext leaf) throws IOException { + return new ScoreScript(null, null, null) { // Fake the scorer until setScorer is called. DoubleValues values = source.getValues(leaf, new DoubleValues() { @Override public double doubleValue() throws IOException { - return getScore(); + return get_score(); } @Override @@ -73,10 +68,7 @@ public boolean advanceExact(int doc) throws IOException { }); @Override - public Object run() { return Double.valueOf(runAsDouble()); } - - @Override - public double runAsDouble() { + public double execute() { try { return values.doubleValue(); } catch (Exception exception) { @@ -92,24 +84,6 @@ public void setDocument(int d) { throw new IllegalStateException("Can't advance to doc using " + exprScript, e); } } - - @Override - public void setNextAggregationValue(Object value) { - // _value isn't used in script if specialValue == null - if (specialValue != null) { - if (value instanceof Number) { - specialValue.setValue(((Number)value).doubleValue()); - } else { - throw new GeneralScriptException("Cannot use expression with text variable using " + exprScript); - } - } - } - - @Override - public void setNextVar(String name, Object value) { - // other per-document variables aren't supported yet, even if they are numbers - // but we shouldn't encourage this anyway. - } }; } diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index d719f7a2cbcd8..150bca60273e9 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -23,10 +23,8 @@ import org.apache.lucene.expressions.SimpleBindings; import org.apache.lucene.expressions.js.JavascriptCompiler; import org.apache.lucene.expressions.js.VariableContext; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource; -import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; @@ -48,11 +46,9 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptException; -import org.elasticsearch.script.SearchScript; import org.elasticsearch.script.TermsSetQueryScript; import org.elasticsearch.search.lookup.SearchLookup; -import java.io.IOException; import java.security.AccessControlContext; import java.security.AccessController; import java.security.PrivilegedAction; @@ -63,8 +59,9 @@ import java.util.Map; /** - * Provides the infrastructure for Lucene expressions as a scripting language for Elasticsearch. Only - * {@link SearchScript}s are supported. + * Provides the infrastructure for Lucene expressions as a scripting language for Elasticsearch. + * + * Only contexts returning numeric types or {@link Object} are supported. */ public class ExpressionScriptEngine extends AbstractComponent implements ScriptEngine { @@ -111,10 +108,7 @@ protected Class loadClass(String name, boolean resolve) throws ClassNotFoundE } } }); - if (context.instanceClazz.equals(SearchScript.class)) { - SearchScript.Factory factory = (p, lookup) -> newSearchScript(expr, lookup, p); - return context.factoryClazz.cast(factory); - } else if (context.instanceClazz.equals(BucketAggregationScript.class)) { + if (context.instanceClazz.equals(BucketAggregationScript.class)) { return context.factoryClazz.cast(newBucketAggregationScriptFactory(expr)); } else if (context.instanceClazz.equals(BucketAggregationSelectorScript.class)) { BucketAggregationScript.Factory factory = newBucketAggregationScriptFactory(expr); @@ -178,40 +172,6 @@ public Double execute() { }; } - private SearchScript.LeafFactory newSearchScript(Expression expr, SearchLookup lookup, @Nullable Map vars) { - // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, - // instead of complicating SimpleBindings (which should stay simple) - SimpleBindings bindings = new SimpleBindings(); - ReplaceableConstDoubleValueSource specialValue = null; - boolean needsScores = false; - for (String variable : expr.variables) { - try { - if (variable.equals("_score")) { - bindings.add(new SortField("_score", SortField.Type.SCORE)); - needsScores = true; - } else if (variable.equals("_value")) { - specialValue = new ReplaceableConstDoubleValueSource(); - bindings.add("_value", specialValue); - // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings - // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a - // way to know this is for aggregations and so _value is ok to have... - } else if (vars != null && vars.containsKey(variable)) { - bindFromParams(vars, bindings, variable); - } else { - // delegate valuesource creation based on field's type - // there are three types of "fields" to expressions, and each one has a different "api" of variables and methods. - final ValueSource valueSource = getDocValueSource(variable, lookup); - needsScores |= valueSource.getSortField(false).needsScores(); - bindings.add(variable, valueSource.asDoubleValuesSource()); - } - } catch (Exception e) { - // we defer "binding" of variables until here: give context for that variable - throw convertToScriptException("link error", expr.sourceText, variable, e); - } - } - return new ExpressionSearchScript(expr, bindings, specialValue, needsScores); - } - private NumberSortScript.LeafFactory newSortScript(Expression expr, SearchLookup lookup, @Nullable Map vars) { // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, // instead of complicating SimpleBindings (which should stay simple) @@ -315,13 +275,13 @@ private FieldScript.LeafFactory newFieldScript(Expression expr, SearchLookup loo * See https://github.com/elastic/elasticsearch/issues/26429. */ private FilterScript.LeafFactory newFilterScript(Expression expr, SearchLookup lookup, @Nullable Map vars) { - SearchScript.LeafFactory searchLeafFactory = newSearchScript(expr, lookup, vars); + ScoreScript.LeafFactory searchLeafFactory = newScoreScript(expr, lookup, vars); return ctx -> { - SearchScript script = searchLeafFactory.newInstance(ctx); + ScoreScript script = searchLeafFactory.newInstance(ctx); return new FilterScript(vars, lookup, ctx) { @Override public boolean execute() { - return script.runAsDouble() != 0.0; + return script.execute() != 0.0; } @Override public void setDocument(int docid) { @@ -332,39 +292,37 @@ public void setDocument(int docid) { } private ScoreScript.LeafFactory newScoreScript(Expression expr, SearchLookup lookup, @Nullable Map vars) { - SearchScript.LeafFactory searchLeafFactory = newSearchScript(expr, lookup, vars); - return new ScoreScript.LeafFactory() { - @Override - public boolean needs_score() { - return searchLeafFactory.needs_score(); - } - - @Override - public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - SearchScript script = searchLeafFactory.newInstance(ctx); - return new ScoreScript(vars, lookup, ctx) { - @Override - public double execute() { - return script.runAsDouble(); - } - - @Override - public void setDocument(int docid) { - script.setDocument(docid); - } - - @Override - public void setScorer(Scorable scorer) { - script.setScorer(scorer); - } - - @Override - public double get_score() { - return script.getScore(); - } - }; + // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, + // instead of complicating SimpleBindings (which should stay simple) + SimpleBindings bindings = new SimpleBindings(); + ReplaceableConstDoubleValueSource specialValue = null; + boolean needsScores = false; + for (String variable : expr.variables) { + try { + if (variable.equals("_score")) { + bindings.add(new SortField("_score", SortField.Type.SCORE)); + needsScores = true; + } else if (variable.equals("_value")) { + specialValue = new ReplaceableConstDoubleValueSource(); + bindings.add("_value", specialValue); + // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings + // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a + // way to know this is for aggregations and so _value is ok to have... + } else if (vars != null && vars.containsKey(variable)) { + bindFromParams(vars, bindings, variable); + } else { + // delegate valuesource creation based on field's type + // there are three types of "fields" to expressions, and each one has a different "api" of variables and methods. + final ValueSource valueSource = getDocValueSource(variable, lookup); + needsScores |= valueSource.getSortField(false).needsScores(); + bindings.add(variable, valueSource.asDoubleValuesSource()); + } + } catch (Exception e) { + // we defer "binding" of variables until here: give context for that variable + throw convertToScriptException("link error", expr.sourceText, variable, e); } - }; + } + return new ExpressionScoreScript(expr, bindings, needsScores); } /** diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java index fdf9e09e07f14..7b1e53a336c1a 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java @@ -53,7 +53,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketScript; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/10_basic.yml b/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/10_basic.yml index 0ca21cab93089..00ad6f890b0e0 100644 --- a/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/10_basic.yml +++ b/modules/lang-expression/src/test/resources/rest-api-spec/test/lang_expression/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for Expression scripts # "Expression loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index c3ec5ae1784a9..0463069609d4c 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -169,7 +169,7 @@ public void testBasic() throws Exception { MultiSearchTemplateResponse.Item response4 = response.getResponses()[3]; assertThat(response4.isFailure(), is(true)); assertThat(response4.getFailure(), instanceOf(IndexNotFoundException.class)); - assertThat(response4.getFailure().getMessage(), equalTo("no such index")); + assertThat(response4.getFailure().getMessage(), equalTo("no such index [unknown]")); MultiSearchTemplateResponse.Item response5 = response.getResponses()[4]; assertThat(response5.isFailure(), is(false)); diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/10_basic.yml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/10_basic.yml index 1a014e9cceaa6..0e853d6273142 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/10_basic.yml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for Mustache scripts # "Mustache loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index c36625ad145be..b08b38d2bfc7b 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -66,13 +66,17 @@ public final class Whitelist { /** The {@link List} of all the whitelisted Painless class bindings. */ public final List whitelistClassBindings; + /** The {@link List} of all the whitelisted Painless instance bindings. */ + public final List whitelistInstanceBindings; + /** Standard constructor. All values must be not {@code null}. */ - public Whitelist(ClassLoader classLoader, List whitelistClasses, - List whitelistImportedMethods, List whitelistClassBindings) { + public Whitelist(ClassLoader classLoader, List whitelistClasses, List whitelistImportedMethods, + List whitelistClassBindings, List whitelistInstanceBindings) { this.classLoader = Objects.requireNonNull(classLoader); this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses)); this.whitelistImportedMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistImportedMethods)); this.whitelistClassBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistClassBindings)); + this.whitelistInstanceBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistInstanceBindings)); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClassBinding.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClassBinding.java index f1e762b37c02f..da19917464820 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClassBinding.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClassBinding.java @@ -42,9 +42,7 @@ public class WhitelistClassBinding { /** The method name for this class binding. */ public final String methodName; - /** - * The canonical type name for the return type. - */ + /** The canonical type name for the return type. */ public final String returnCanonicalTypeName; /** diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistInstanceBinding.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistInstanceBinding.java new file mode 100644 index 0000000000000..46c2f0f91fe02 --- /dev/null +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistInstanceBinding.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.spi; + +import java.util.List; +import java.util.Objects; + +/** + * An instance binding represents a method call that stores state. Each instance binding must provide + * exactly one public method name. The canonical type name parameters provided must match those of the + * method. The method for an instance binding will target the specified Java instance. + */ +public class WhitelistInstanceBinding { + + /** Information about where this constructor was whitelisted from. */ + public final String origin; + + /** The Java instance this instance binding targets. */ + public final Object targetInstance; + + /** The method name for this class binding. */ + public final String methodName; + + /** The canonical type name for the return type. */ + public final String returnCanonicalTypeName; + + /** + * A {@link List} of {@link String}s that are the Painless type names for the parameters of the + * constructor which can be used to look up the Java constructor through reflection. + */ + public final List canonicalTypeNameParameters; + + /** Standard constructor. All values must be not {@code null}. */ + public WhitelistInstanceBinding(String origin, Object targetInstance, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + this.origin = Objects.requireNonNull(origin); + this.targetInstance = Objects.requireNonNull(targetInstance); + + this.methodName = Objects.requireNonNull(methodName); + this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); + this.canonicalTypeNameParameters = Objects.requireNonNull(canonicalTypeNameParameters); + } +} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index 560010a35e9be..d896c345a47e9 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -29,6 +29,7 @@ import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; /** Loads and creates a {@link Whitelist} from one to many text files. */ @@ -392,7 +393,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep ClassLoader loader = AccessController.doPrivileged((PrivilegedAction)resource::getClassLoader); - return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistClassBindings); + return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistClassBindings, Collections.emptyList()); } private WhitelistLoader() {} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index 0fbdfa763eae3..81cc802916d4e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless; import org.elasticsearch.bootstrap.BootstrapInfo; -import org.elasticsearch.painless.Locals.LocalMethod; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.node.SSource; @@ -222,8 +221,8 @@ Constructor compile(Loader loader, MainMethodReserved reserved, String name, ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass); SSource root = Walker.buildPainlessTree(scriptClassInfo, reserved, name, source, settings, painlessLookup, null); - Map localMethods = root.analyze(painlessLookup); - root.write(); + root.analyze(painlessLookup); + Map statics = root.write(); try { Class clazz = loader.defineScript(CLASS_NAME, root.getBytes()); @@ -231,7 +230,10 @@ Constructor compile(Loader loader, MainMethodReserved reserved, String name, clazz.getField("$SOURCE").set(null, source); clazz.getField("$STATEMENTS").set(null, root.getStatements()); clazz.getField("$DEFINITION").set(null, painlessLookup); - clazz.getField("$LOCALS").set(null, localMethods); + + for (Map.Entry statik : statics.entrySet()) { + clazz.getField(statik.getKey()).set(null, statik.getValue()); + } return clazz.getConstructors()[0]; } catch (Exception exception) { // Catch everything to let the user know this is something caused internally. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java index d18cf2780cf3c..a6a15b8ce1e67 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java @@ -31,7 +31,8 @@ public class Globals { private final Map syntheticMethods = new HashMap<>(); private final Map constantInitializers = new HashMap<>(); - private final Map> bindings = new HashMap<>(); + private final Map> classBindings = new HashMap<>(); + private final Map instanceBindings = new HashMap<>(); private final BitSet statements; /** Create a new Globals from the set of statement boundaries */ @@ -56,14 +57,19 @@ public void addConstantInitializer(Constant constant) { } } - /** Adds a new binding to be written as a local variable */ - public String addBinding(Class type) { - String name = "$binding$" + bindings.size(); - bindings.put(name, type); + /** Adds a new class binding to be written as a local variable */ + public String addClassBinding(Class type) { + String name = "$class_binding$" + classBindings.size(); + classBindings.put(name, type); return name; } + /** Adds a new binding to be written as a local variable */ + public String addInstanceBinding(Object instance) { + return instanceBindings.computeIfAbsent(instance, key -> "$instance_binding$" + instanceBindings.size()); + } + /** Returns the current synthetic methods */ public Map getSyntheticMethods() { return syntheticMethods; @@ -75,8 +81,13 @@ public Map getConstantInitializers() { } /** Returns the current bindings */ - public Map> getBindings() { - return bindings; + public Map> getClassBindings() { + return classBindings; + } + + /** Returns the current bindings */ + public Map getInstanceBindings() { + return instanceBindings; } /** Returns the set of statement boundaries */ diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 3057378646730..1773b3445c429 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -40,7 +40,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript; +import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript; import java.util.ArrayList; import java.util.Arrays; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 5ed305751c8e8..4e7ffbfb8d026 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -19,7 +19,6 @@ package org.elasticsearch.painless; -import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -29,7 +28,6 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptException; -import org.elasticsearch.script.SearchScript; import org.objectweb.asm.ClassWriter; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; @@ -37,7 +35,6 @@ import java.lang.invoke.MethodType; import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.security.AccessControlContext; import java.security.AccessController; @@ -101,13 +98,8 @@ public PainlessScriptEngine(Settings settings, Map, List, List> entry : contexts.entrySet()) { ScriptContext context = entry.getKey(); - if (context.instanceClazz.equals(SearchScript.class)) { - contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, null, null, - PainlessLookupBuilder.buildFromWhitelists(entry.getValue()))); - } else { - contextsToCompilers.put(context, new Compiler(context.instanceClazz, context.factoryClazz, context.statefulFactoryClazz, - PainlessLookupBuilder.buildFromWhitelists(entry.getValue()))); - } + contextsToCompilers.put(context, new Compiler(context.instanceClazz, context.factoryClazz, context.statefulFactoryClazz, + PainlessLookupBuilder.buildFromWhitelists(entry.getValue()))); } this.contextsToCompilers = Collections.unmodifiableMap(contextsToCompilers); @@ -126,54 +118,24 @@ public String getType() { public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { Compiler compiler = contextsToCompilers.get(context); - if (context.instanceClazz.equals(SearchScript.class)) { - Constructor constructor = compile(compiler, scriptName, scriptSource, params); - boolean needsScore; + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); - try { - GenericElasticsearchScript newInstance = (GenericElasticsearchScript)constructor.newInstance(); - needsScore = newInstance.needs_score(); - } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { - throw new IllegalArgumentException("internal error"); + // Create our loader (which loads compiled code with no permissions). + final Loader loader = AccessController.doPrivileged(new PrivilegedAction() { + @Override + public Loader run() { + return compiler.createLoader(getClass().getClassLoader()); } + }); - SearchScript.Factory factory = (p, lookup) -> new SearchScript.LeafFactory() { - @Override - public SearchScript newInstance(final LeafReaderContext context) { - try { - // a new instance is required for the class bindings model to work correctly - GenericElasticsearchScript newInstance = (GenericElasticsearchScript)constructor.newInstance(); - return new ScriptImpl(newInstance, p, lookup, context); - } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { - throw new IllegalArgumentException("internal error"); - } - } - @Override - public boolean needs_score() { - return needsScore; - } - }; - return context.factoryClazz.cast(factory); - } else { - // Check we ourselves are not being called by unprivileged code. - SpecialPermission.check(); - - // Create our loader (which loads compiled code with no permissions). - final Loader loader = AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Loader run() { - return compiler.createLoader(getClass().getClassLoader()); - } - }); - - MainMethodReserved reserved = new MainMethodReserved(); - compile(contextsToCompilers.get(context), loader, reserved, scriptName, scriptSource, params); + MainMethodReserved reserved = new MainMethodReserved(); + compile(contextsToCompilers.get(context), loader, reserved, scriptName, scriptSource, params); - if (context.statefulFactoryClazz != null) { - return generateFactory(loader, context, reserved, generateStatefulFactory(loader, context, reserved)); - } else { - return generateFactory(loader, context, reserved, WriterConstants.CLASS_TYPE); - } + if (context.statefulFactoryClazz != null) { + return generateFactory(loader, context, reserved, generateStatefulFactory(loader, context, reserved)); + } else { + return generateFactory(loader, context, reserved, WriterConstants.CLASS_TYPE); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java deleted file mode 100644 index c16c3b1bb8a74..0000000000000 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.painless; - -import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.script.SearchScript; -import org.elasticsearch.search.lookup.LeafSearchLookup; -import org.elasticsearch.search.lookup.SearchLookup; - -import java.util.HashMap; -import java.util.Map; -import java.util.function.DoubleSupplier; -import java.util.function.Function; - -/** - * ScriptImpl can be used as a {@link SearchScript} - * to run a previously compiled Painless script. - */ -final class ScriptImpl extends SearchScript { - - /** - * The Painless script that can be run. - */ - private final GenericElasticsearchScript script; - - /** - * A map that can be used to access input parameters at run-time. - */ - private final Map variables; - - /** - * Looks up the {@code _score} from {@link #scorer} if {@code _score} is used, otherwise returns {@code 0.0}. - */ - private final DoubleSupplier scoreLookup; - - /** - * Looks up the {@code ctx} from the {@link #variables} if {@code ctx} is used, otherwise return {@code null}. - */ - private final Function, Map> ctxLookup; - - /** - * Current _value for aggregation - * @see #setNextAggregationValue(Object) - */ - private Object aggregationValue; - - /** - * Creates a ScriptImpl for the a previously compiled Painless script. - * @param script The previously compiled Painless script. - * @param vars The initial variables to run the script with. - * @param lookup The lookup to allow search fields to be available if this is run as a search script. - */ - ScriptImpl(GenericElasticsearchScript script, Map vars, SearchLookup lookup, LeafReaderContext leafContext) { - super(null, lookup, leafContext); - this.script = script; - this.variables = new HashMap<>(); - - if (vars != null) { - variables.putAll(vars); - } - LeafSearchLookup leafLookup = getLeafLookup(); - if (leafLookup != null) { - variables.putAll(leafLookup.asMap()); - } - - scoreLookup = script.needs_score() ? this::getScore : () -> 0.0; - ctxLookup = script.needsCtx() ? variables -> (Map) variables.get("ctx") : variables -> null; - } - - @Override - public Map getParams() { - return variables; - } - - @Override - public void setNextVar(final String name, final Object value) { - variables.put(name, value); - } - - @Override - public void setNextAggregationValue(Object value) { - this.aggregationValue = value; - } - - @Override - public Object run() { - return script.execute(variables, scoreLookup.getAsDouble(), getDoc(), aggregationValue, ctxLookup.apply(variables)); - } - - @Override - public double runAsDouble() { - return ((Number)run()).doubleValue(); - } - -} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBinding.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBinding.java index 0f28830b3d4ab..aedbc936bb1d4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBinding.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBinding.java @@ -60,7 +60,6 @@ public boolean equals(Object object) { @Override public int hashCode() { - return Objects.hash(javaConstructor, javaMethod, returnType, typeParameters); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessInstanceBinding.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessInstanceBinding.java new file mode 100644 index 0000000000000..6952a3f05fb64 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessInstanceBinding.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.lookup; + +import java.lang.reflect.Method; +import java.util.List; +import java.util.Objects; + +public class PainlessInstanceBinding { + + public final Object targetInstance; + public final Method javaMethod; + + public final Class returnType; + public final List> typeParameters; + + PainlessInstanceBinding(Object targetInstance, Method javaMethod, Class returnType, List> typeParameters) { + this.targetInstance = targetInstance; + this.javaMethod = javaMethod; + + this.returnType = returnType; + this.typeParameters = typeParameters; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PainlessInstanceBinding that = (PainlessInstanceBinding)object; + + return targetInstance == that.targetInstance && + Objects.equals(javaMethod, that.javaMethod) && + Objects.equals(returnType, that.returnType) && + Objects.equals(typeParameters, that.typeParameters); + } + + @Override + public int hashCode() { + return Objects.hash(targetInstance, javaMethod, returnType, typeParameters); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java index ce31db43eeff3..5ac5d5bf7847d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java @@ -40,13 +40,15 @@ public final class PainlessLookup { private final Map painlessMethodKeysToImportedPainlessMethods; private final Map painlessMethodKeysToPainlessClassBindings; + private final Map painlessMethodKeysToPainlessInstanceBindings; PainlessLookup( Map> javaClassNamesToClasses, Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses, Map painlessMethodKeysToImportedPainlessMethods, - Map painlessMethodKeysToPainlessClassBindings) { + Map painlessMethodKeysToPainlessClassBindings, + Map painlessMethodKeysToPainlessInstanceBindings) { Objects.requireNonNull(javaClassNamesToClasses); Objects.requireNonNull(canonicalClassNamesToClasses); @@ -54,6 +56,7 @@ public final class PainlessLookup { Objects.requireNonNull(painlessMethodKeysToImportedPainlessMethods); Objects.requireNonNull(painlessMethodKeysToPainlessClassBindings); + Objects.requireNonNull(painlessMethodKeysToPainlessInstanceBindings); this.javaClassNamesToClasses = javaClassNamesToClasses; this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses); @@ -61,6 +64,7 @@ public final class PainlessLookup { this.painlessMethodKeysToImportedPainlessMethods = Collections.unmodifiableMap(painlessMethodKeysToImportedPainlessMethods); this.painlessMethodKeysToPainlessClassBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessClassBindings); + this.painlessMethodKeysToPainlessInstanceBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessInstanceBindings); } public Class javaClassNameToClass(String javaClassName) { @@ -200,6 +204,14 @@ public PainlessClassBinding lookupPainlessClassBinding(String methodName, int ar return painlessMethodKeysToPainlessClassBindings.get(painlessMethodKey); } + public PainlessInstanceBinding lookupPainlessInstanceBinding(String methodName, int arity) { + Objects.requireNonNull(methodName); + + String painlessMethodKey = buildPainlessMethodKey(methodName, arity); + + return painlessMethodKeysToPainlessInstanceBindings.get(painlessMethodKey); + } + public PainlessMethod lookupFunctionalInterfacePainlessMethod(Class targetClass) { PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index 552ad56f68a63..495a4ea94c962 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.painless.spi.WhitelistClassBinding; import org.elasticsearch.painless.spi.WhitelistConstructor; import org.elasticsearch.painless.spi.WhitelistField; +import org.elasticsearch.painless.spi.WhitelistInstanceBinding; import org.elasticsearch.painless.spi.WhitelistMethod; import java.lang.invoke.MethodHandle; @@ -50,10 +51,11 @@ public final class PainlessLookupBuilder { - private static final Map painlessConstructorCache = new HashMap<>(); - private static final Map painlessMethodCache = new HashMap<>(); - private static final Map painlessFieldCache = new HashMap<>(); - private static final Map painlessClassBindingCache = new HashMap<>(); + private static final Map painlessConstructorCache = new HashMap<>(); + private static final Map painlessMethodCache = new HashMap<>(); + private static final Map painlessFieldCache = new HashMap<>(); + private static final Map painlessClassBindingCache = new HashMap<>(); + private static final Map painlessInstanceBindingCache = new HashMap<>(); private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); @@ -108,9 +110,15 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { for (WhitelistClassBinding whitelistClassBinding : whitelist.whitelistClassBindings) { origin = whitelistClassBinding.origin; painlessLookupBuilder.addPainlessClassBinding( - whitelist.classLoader, whitelistClassBinding.targetJavaClassName, - whitelistClassBinding.methodName, whitelistClassBinding.returnCanonicalTypeName, - whitelistClassBinding.canonicalTypeNameParameters); + whitelist.classLoader, whitelistClassBinding.targetJavaClassName, whitelistClassBinding.methodName, + whitelistClassBinding.returnCanonicalTypeName, whitelistClassBinding.canonicalTypeNameParameters); + } + + for (WhitelistInstanceBinding whitelistInstanceBinding : whitelist.whitelistInstanceBindings) { + origin = whitelistInstanceBinding.origin; + painlessLookupBuilder.addPainlessInstanceBinding( + whitelistInstanceBinding.targetInstance, whitelistInstanceBinding.methodName, + whitelistInstanceBinding.returnCanonicalTypeName, whitelistInstanceBinding.canonicalTypeNameParameters); } } } catch (Exception exception) { @@ -134,6 +142,7 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { private final Map painlessMethodKeysToImportedPainlessMethods; private final Map painlessMethodKeysToPainlessClassBindings; + private final Map painlessMethodKeysToPainlessInstanceBindings; public PainlessLookupBuilder() { javaClassNamesToClasses = new HashMap<>(); @@ -142,6 +151,7 @@ public PainlessLookupBuilder() { painlessMethodKeysToImportedPainlessMethods = new HashMap<>(); painlessMethodKeysToPainlessClassBindings = new HashMap<>(); + painlessMethodKeysToPainlessInstanceBindings = new HashMap<>(); } private Class canonicalTypeNameToType(String canonicalTypeName) { @@ -763,6 +773,10 @@ public void addImportedPainlessMethod(Class targetClass, String methodName, C throw new IllegalArgumentException("imported method and class binding cannot have the same name [" + methodName + "]"); } + if (painlessMethodKeysToPainlessInstanceBindings.containsKey(painlessMethodKey)) { + throw new IllegalArgumentException("imported method and instance binding cannot have the same name [" + methodName + "]"); + } + MethodHandle methodHandle; try { @@ -783,7 +797,7 @@ public void addImportedPainlessMethod(Class targetClass, String methodName, C painlessMethodKeysToImportedPainlessMethods.put(painlessMethodKey, newImportedPainlessMethod); } else if (newImportedPainlessMethod.equals(existingImportedPainlessMethod) == false) { throw new IllegalArgumentException("cannot add imported methods with the same name and arity " + - "but are not equivalent for methods " + + "but do not have equivalent methods " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + "[" + typeToCanonicalTypeName(returnType) + "], " + typesToCanonicalTypeNames(typeParameters) + "] and " + @@ -942,6 +956,11 @@ public void addPainlessClassBinding(Class targetClass, String methodName, Cla } } + if (isValidType(returnType) == false) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(returnType) + "] not found for class binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + if (javaMethod.getReturnType() != typeToJavaType(returnType)) { throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " + "does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " + @@ -955,6 +974,15 @@ public void addPainlessClassBinding(Class targetClass, String methodName, Cla throw new IllegalArgumentException("class binding and imported method cannot have the same name [" + methodName + "]"); } + if (painlessMethodKeysToPainlessInstanceBindings.containsKey(painlessMethodKey)) { + throw new IllegalArgumentException("class binding and instance binding cannot have the same name [" + methodName + "]"); + } + + if (Modifier.isStatic(javaMethod.getModifiers())) { + throw new IllegalArgumentException("class binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "] cannot be static"); + } + PainlessClassBinding existingPainlessClassBinding = painlessMethodKeysToPainlessClassBindings.get(painlessMethodKey); PainlessClassBinding newPainlessClassBinding = new PainlessClassBinding(javaConstructor, javaMethod, returnType, typeParameters); @@ -962,9 +990,9 @@ public void addPainlessClassBinding(Class targetClass, String methodName, Cla if (existingPainlessClassBinding == null) { newPainlessClassBinding = painlessClassBindingCache.computeIfAbsent(newPainlessClassBinding, key -> key); painlessMethodKeysToPainlessClassBindings.put(painlessMethodKey, newPainlessClassBinding); - } else if (newPainlessClassBinding.equals(existingPainlessClassBinding)) { + } else if (newPainlessClassBinding.equals(existingPainlessClassBinding) == false) { throw new IllegalArgumentException("cannot add class bindings with the same name and arity " + - "but are not equivalent for methods " + + "but do not have equivalent methods " + "[[" + targetCanonicalClassName + "], " + "[" + methodName + "], " + "[" + typeToCanonicalTypeName(returnType) + "], " + @@ -976,6 +1004,136 @@ public void addPainlessClassBinding(Class targetClass, String methodName, Cla } } + public void addPainlessInstanceBinding(Object targetInstance, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + Objects.requireNonNull(targetInstance); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnCanonicalTypeName); + Objects.requireNonNull(canonicalTypeNameParameters); + + Class targetClass = targetInstance.getClass(); + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); + + for (String canonicalTypeNameParameter : canonicalTypeNameParameters) { + Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); + + if (typeParameter == null) { + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for instance binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + typeParameters.add(typeParameter); + } + + Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); + + if (returnType == null) { + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for class binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + addPainlessInstanceBinding(targetInstance, methodName, returnType, typeParameters); + } + + public void addPainlessInstanceBinding(Object targetInstance, String methodName, Class returnType, List> typeParameters) { + Objects.requireNonNull(targetInstance); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnType); + Objects.requireNonNull(typeParameters); + + Class targetClass = targetInstance.getClass(); + + if (targetClass == def.class) { + throw new IllegalArgumentException("cannot add instance binding as reserved class [" + DEF_CLASS_NAME + "]"); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + Class existingTargetClass = javaClassNamesToClasses.get(targetClass.getName()); + + if (existingTargetClass == null) { + javaClassNamesToClasses.put(targetClass.getName(), targetClass); + } else if (existingTargetClass != targetClass) { + throw new IllegalArgumentException("class [" + targetCanonicalClassName + "] " + + "cannot represent multiple java classes with the same name from different class loaders"); + } + + if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + throw new IllegalArgumentException( + "invalid method name [" + methodName + "] for instance binding [" + targetCanonicalClassName + "]."); + } + + int typeParametersSize = typeParameters.size(); + List> javaTypeParameters = new ArrayList<>(typeParametersSize); + + for (Class typeParameter : typeParameters) { + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "not found for instance binding [[" + targetCanonicalClassName + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + javaTypeParameters.add(typeToJavaType(typeParameter)); + } + + if (isValidType(returnType) == false) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(returnType) + "] not found for imported method " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Method javaMethod; + + try { + javaMethod = targetClass.getMethod(methodName, javaTypeParameters.toArray(new Class[typeParametersSize])); + } catch (NoSuchMethodException nsme) { + throw new IllegalArgumentException("instance binding reflection object [[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + typesToCanonicalTypeNames(typeParameters) + "] not found", nsme); + } + + if (javaMethod.getReturnType() != typeToJavaType(returnType)) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " + + "does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " + + "for instance binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (Modifier.isStatic(javaMethod.getModifiers())) { + throw new IllegalArgumentException("instance binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "] cannot be static"); + } + + String painlessMethodKey = buildPainlessMethodKey(methodName, typeParametersSize); + + if (painlessMethodKeysToImportedPainlessMethods.containsKey(painlessMethodKey)) { + throw new IllegalArgumentException("instance binding and imported method cannot have the same name [" + methodName + "]"); + } + + if (painlessMethodKeysToPainlessClassBindings.containsKey(painlessMethodKey)) { + throw new IllegalArgumentException("instance binding and class binding cannot have the same name [" + methodName + "]"); + } + + PainlessInstanceBinding existingPainlessInstanceBinding = painlessMethodKeysToPainlessInstanceBindings.get(painlessMethodKey); + PainlessInstanceBinding newPainlessInstanceBinding = + new PainlessInstanceBinding(targetInstance, javaMethod, returnType, typeParameters); + + if (existingPainlessInstanceBinding == null) { + newPainlessInstanceBinding = painlessInstanceBindingCache.computeIfAbsent(newPainlessInstanceBinding, key -> key); + painlessMethodKeysToPainlessInstanceBindings.put(painlessMethodKey, newPainlessInstanceBinding); + } else if (newPainlessInstanceBinding.equals(existingPainlessInstanceBinding) == false) { + throw new IllegalArgumentException("cannot add instances bindings with the same name and arity " + + "but do not have equivalent methods " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(returnType) + "], " + + typesToCanonicalTypeNames(typeParameters) + "] and " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(existingPainlessInstanceBinding.returnType) + "], " + + typesToCanonicalTypeNames(existingPainlessInstanceBinding.typeParameters) + "]"); + } + } + public PainlessLookup build() { copyPainlessClassMembers(); cacheRuntimeHandles(); @@ -1003,8 +1161,13 @@ public PainlessLookup build() { "must have the same classes as the keys of classes to painless classes"); } - return new PainlessLookup(javaClassNamesToClasses, canonicalClassNamesToClasses, classesToPainlessClasses, - painlessMethodKeysToImportedPainlessMethods, painlessMethodKeysToPainlessClassBindings); + return new PainlessLookup( + javaClassNamesToClasses, + canonicalClassNamesToClasses, + classesToPainlessClasses, + painlessMethodKeysToImportedPainlessMethods, + painlessMethodKeysToPainlessClassBindings, + painlessMethodKeysToPainlessInstanceBindings); } private void copyPainlessClassMembers() { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java index e613018dbc54c..2d49f4df6483c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java @@ -25,6 +25,7 @@ import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.lookup.PainlessClassBinding; +import org.elasticsearch.painless.lookup.PainlessInstanceBinding; import org.elasticsearch.painless.lookup.PainlessMethod; import org.objectweb.asm.Label; import org.objectweb.asm.Type; @@ -48,6 +49,7 @@ public final class ECallLocal extends AExpression { private LocalMethod localMethod = null; private PainlessMethod importedMethod = null; private PainlessClassBinding classBinding = null; + private PainlessInstanceBinding instanceBinding = null; public ECallLocal(Location location, String name, List arguments) { super(location); @@ -74,8 +76,12 @@ void analyze(Locals locals) { classBinding = locals.getPainlessLookup().lookupPainlessClassBinding(name, arguments.size()); if (classBinding == null) { - throw createError( - new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + instanceBinding = locals.getPainlessLookup().lookupPainlessInstanceBinding(name, arguments.size()); + + if (instanceBinding == null) { + throw createError( + new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + } } } } @@ -91,6 +97,9 @@ void analyze(Locals locals) { } else if (classBinding != null) { typeParameters = new ArrayList<>(classBinding.typeParameters); actual = classBinding.returnType; + } else if (instanceBinding != null) { + typeParameters = new ArrayList<>(instanceBinding.typeParameters); + actual = instanceBinding.returnType; } else { throw new IllegalStateException("Illegal tree structure."); } @@ -125,7 +134,7 @@ void write(MethodWriter writer, Globals globals) { writer.invokeStatic(Type.getType(importedMethod.targetClass), new Method(importedMethod.javaMethod.getName(), importedMethod.methodType.toMethodDescriptorString())); } else if (classBinding != null) { - String name = globals.addBinding(classBinding.javaConstructor.getDeclaringClass()); + String name = globals.addClassBinding(classBinding.javaConstructor.getDeclaringClass()); Type type = Type.getType(classBinding.javaConstructor.getDeclaringClass()); int javaConstructorParameterCount = classBinding.javaConstructor.getParameterCount(); @@ -154,6 +163,18 @@ void write(MethodWriter writer, Globals globals) { } writer.invokeVirtual(type, Method.getMethod(classBinding.javaMethod)); + } else if (instanceBinding != null) { + String name = globals.addInstanceBinding(instanceBinding.targetInstance); + Type type = Type.getType(instanceBinding.targetInstance.getClass()); + + writer.loadThis(); + writer.getStatic(CLASS_TYPE, name, type); + + for (int argument = 0; argument < instanceBinding.javaMethod.getParameterCount(); ++argument) { + arguments.get(argument).write(writer, globals); + } + + writer.invokeVirtual(type, Method.getMethod(instanceBinding.javaMethod)); } else { throw new IllegalStateException("Illegal tree structure."); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 01946066af99a..ca20cf19a5f0b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -164,7 +164,7 @@ void extractVariables(Set variables) { throw new IllegalStateException("Illegal tree structure."); } - public Map analyze(PainlessLookup painlessLookup) { + public void analyze(PainlessLookup painlessLookup) { Map methods = new HashMap<>(); for (SFunction function : functions) { @@ -180,8 +180,6 @@ public Map analyze(PainlessLookup painlessLookup) { Locals locals = Locals.newProgramScope(painlessLookup, methods.values()); analyze(locals); - - return locals.getMethods(); } @Override @@ -228,7 +226,7 @@ void analyze(Locals program) { } } - public void write() { + public Map write() { // Create the ClassWriter. int classFrames = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS; @@ -359,13 +357,20 @@ public void write() { clinit.endMethod(); } - // Write binding variables - for (Map.Entry> binding : globals.getBindings().entrySet()) { - String name = binding.getKey(); - String descriptor = Type.getType(binding.getValue()).getDescriptor(); + // Write class binding variables + for (Map.Entry> classBinding : globals.getClassBindings().entrySet()) { + String name = classBinding.getKey(); + String descriptor = Type.getType(classBinding.getValue()).getDescriptor(); visitor.visitField(Opcodes.ACC_PRIVATE, name, descriptor, null, null).visitEnd(); } + // Write instance binding variables + for (Map.Entry instanceBinding : globals.getInstanceBindings().entrySet()) { + String name = instanceBinding.getValue(); + String descriptor = Type.getType(instanceBinding.getKey().getClass()).getDescriptor(); + visitor.visitField(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC, name, descriptor, null, null).visitEnd(); + } + // Write any needsVarName methods for used variables for (org.objectweb.asm.commons.Method needsMethod : scriptClassInfo.getNeedsMethods()) { String name = needsMethod.getName(); @@ -382,6 +387,15 @@ public void write() { visitor.visitEnd(); bytes = writer.toByteArray(); + + Map statics = new HashMap<>(); + statics.put("$LOCALS", mainMethod.getMethods()); + + for (Map.Entry instanceBinding : globals.getInstanceBindings().entrySet()) { + statics.put(instanceBinding.getValue(), instanceBinding.getKey()); + } + + return statics; } @Override diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt index a120b73820ada..87f6f7d9aeaf8 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt @@ -19,7 +19,7 @@ # This file contains a whitelist for the Moving Function pipeline aggregator in core -class org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions { +class org.elasticsearch.search.aggregations.pipeline.MovingFunctions { double max(double[]) double min(double[]) double sum(double[]) diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java index 167deb3a20bf9..3f3d589702a5e 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java @@ -20,14 +20,32 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistInstanceBinding; import org.elasticsearch.script.ScriptContext; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; public class BindingsTests extends ScriptTestCase { + public static class InstanceBindingTestClass { + private int value; + + public InstanceBindingTestClass(int value) { + this.value = value; + } + + public void setInstanceBindingValue(int value) { + this.value = value; + } + + public int getInstanceBindingValue() { + return value; + } + } + public abstract static class BindingsTestScript { public static final String[] PARAMETERS = { "test", "bound" }; public abstract int execute(int test, int bound); @@ -40,15 +58,29 @@ public interface Factory { @Override protected Map, List> scriptContexts() { Map, List> contexts = super.scriptContexts(); - contexts.put(BindingsTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + List whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + + InstanceBindingTestClass instanceBindingTestClass = new InstanceBindingTestClass(1); + WhitelistInstanceBinding getter = new WhitelistInstanceBinding("test", instanceBindingTestClass, + "setInstanceBindingValue", "void", Collections.singletonList("int")); + WhitelistInstanceBinding setter = new WhitelistInstanceBinding("test", instanceBindingTestClass, + "getInstanceBindingValue", "int", Collections.emptyList()); + List instanceBindingsList = new ArrayList<>(); + instanceBindingsList.add(getter); + instanceBindingsList.add(setter); + Whitelist instanceBindingsWhitelist = new Whitelist(instanceBindingTestClass.getClass().getClassLoader(), + Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), instanceBindingsList); + whitelists.add(instanceBindingsWhitelist); + + contexts.put(BindingsTestScript.CONTEXT, whitelists); return contexts; } - public void testBasicBinding() { + public void testBasicClassBinding() { assertEquals(15, exec("testAddWithState(4, 5, 6, 0.0)")); } - public void testRepeatedBinding() { + public void testRepeatedClassBinding() { String script = "testAddWithState(4, 5, test, 0.0)"; BindingsTestScript.Factory factory = scriptEngine.compile(null, script, BindingsTestScript.CONTEXT, Collections.emptyMap()); BindingsTestScript executableScript = factory.newInstance(); @@ -58,7 +90,7 @@ public void testRepeatedBinding() { assertEquals(16, executableScript.execute(7, 0)); } - public void testBoundBinding() { + public void testBoundClassBinding() { String script = "testAddWithState(4, bound, test, 0.0)"; BindingsTestScript.Factory factory = scriptEngine.compile(null, script, BindingsTestScript.CONTEXT, Collections.emptyMap()); BindingsTestScript executableScript = factory.newInstance(); @@ -66,4 +98,21 @@ public void testBoundBinding() { assertEquals(10, executableScript.execute(5, 1)); assertEquals(9, executableScript.execute(4, 2)); } + + public void testInstanceBinding() { + String script = "getInstanceBindingValue() + test + bound"; + BindingsTestScript.Factory factory = scriptEngine.compile(null, script, BindingsTestScript.CONTEXT, Collections.emptyMap()); + BindingsTestScript executableScript = factory.newInstance(); + assertEquals(3, executableScript.execute(1, 1)); + + script = "setInstanceBindingValue(test + bound); getInstanceBindingValue()"; + factory = scriptEngine.compile(null, script, BindingsTestScript.CONTEXT, Collections.emptyMap()); + executableScript = factory.newInstance(); + assertEquals(4, executableScript.execute(-2, 6)); + + script = "getInstanceBindingValue() + test + bound"; + factory = scriptEngine.compile(null, script, BindingsTestScript.CONTEXT, Collections.emptyMap()); + executableScript = factory.newInstance(); + assertEquals(8, executableScript.execute(-2, 6)); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index ae33ebfb6e9c5..38c315b134417 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -19,6 +19,7 @@ package org.elasticsearch.painless; +import org.elasticsearch.painless.PainlessExecuteAction.PainlessTestScript; import org.elasticsearch.painless.lookup.PainlessLookupBuilder; import org.elasticsearch.painless.spi.Whitelist; import org.objectweb.asm.util.Textifier; @@ -31,7 +32,7 @@ final class Debugger { /** compiles source to bytecode, and returns debugging output */ static String toString(final String source) { - return toString(GenericElasticsearchScript.class, source, new CompilerSettings()); + return toString(PainlessTestScript.class, source, new CompilerSettings()); } /** compiles to bytecode, and returns debugging output */ diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 5a4c5de015bc1..19fece29e42b5 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -91,7 +91,7 @@ public Object exec(String script, Map vars, boolean picky) { public Object exec(String script, Map vars, Map compileParams, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { - ScriptClassInfo scriptClassInfo = new ScriptClassInfo(PAINLESS_LOOKUP, GenericElasticsearchScript.class); + ScriptClassInfo scriptClassInfo = new ScriptClassInfo(PAINLESS_LOOKUP, PainlessTestScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings())); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java index 5c6fbc54667f2..2d33853b88f30 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java @@ -19,17 +19,26 @@ package org.elasticsearch.painless; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.Scorable; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptedMetricAggContexts; +import org.elasticsearch.search.lookup.LeafSearchLookup; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.lookup.SourceLookup; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + public class ScriptedMetricAggContextsTests extends ScriptTestCase { @Override protected Map, List> scriptContexts() { @@ -57,7 +66,7 @@ public void testInitBasic() { assertEquals(10, state.get("testField")); } - public void testMapBasic() { + public void testMapBasic() throws IOException { ScriptedMetricAggContexts.MapScript.Factory factory = scriptEngine.compile("test", "state.testField = 2*_score", ScriptedMetricAggContexts.MapScript.CONTEXT, Collections.emptyMap()); @@ -82,6 +91,32 @@ public void testMapBasic() { assertEquals(1.0, state.get("testField")); } + public void testMapSourceAccess() throws IOException { + ScriptedMetricAggContexts.MapScript.Factory factory = scriptEngine.compile("test", + "state.testField = params._source.three", ScriptedMetricAggContexts.MapScript.CONTEXT, Collections.emptyMap()); + + Map params = new HashMap<>(); + Map state = new HashMap<>(); + + MemoryIndex index = new MemoryIndex(); + // we don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + SearchLookup lookup = mock(SearchLookup.class); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(lookup.getLeafSearchLookup(leafReaderContext)).thenReturn(leafLookup); + SourceLookup sourceLookup = mock(SourceLookup.class); + when(leafLookup.asMap()).thenReturn(Collections.singletonMap("_source", sourceLookup)); + when(sourceLookup.get("three")).thenReturn(3); + ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, lookup); + ScriptedMetricAggContexts.MapScript script = leafFactory.newInstance(leafReaderContext); + + script.execute(); + + assert(state.containsKey("testField")); + assertEquals(3, state.get("testField")); + } + public void testCombineBasic() { ScriptedMetricAggContexts.CombineScript.Factory factory = scriptEngine.compile("test", "state.testField = params.initialVal; return state.testField + params.inc", ScriptedMetricAggContexts.CombineScript.CONTEXT, diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 12d57fab11d98..9a284a26978e8 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -21,10 +21,10 @@ import org.elasticsearch.painless.CompilerSettings; import org.elasticsearch.painless.FeatureTest; -import org.elasticsearch.painless.GenericElasticsearchScript; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; +import org.elasticsearch.painless.PainlessExecuteAction.PainlessTestScript; import org.elasticsearch.painless.ScriptClassInfo; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.lookup.PainlessCast; @@ -897,7 +897,7 @@ private void assertToString(String expected, String code) { } private SSource walk(String code) { - ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, GenericElasticsearchScript.class); + ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, PainlessTestScript.class); CompilerSettings compilerSettings = new CompilerSettings(); compilerSettings.setRegexesEnabled(true); try { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/10_basic.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/10_basic.yml index 6d008a484ee3f..e442b40ffb845 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/10_basic.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for Painless Plugin # "Painless plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml index 9a43e1f9aa445..0c0e980d95a6f 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml @@ -23,8 +23,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: script: @@ -45,8 +45,8 @@ - match: { hits.hits.1.fields.sNum1.0: 3.0 } - do: - index: test search: + index: test body: query: script: @@ -70,8 +70,8 @@ - match: { hits.hits.1.fields.sNum1.0: 3.0 } - do: - index: test search: + index: test body: query: script: @@ -96,8 +96,8 @@ - match: { hits.hits.2.fields.sNum1.0: 3.0 } - do: - index: test search: + index: test body: query: script: @@ -127,8 +127,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: function_score: @@ -149,8 +149,8 @@ - match: { hits.hits.1._id: "1" } - do: - index: test search: + index: test body: query: function_score: @@ -171,8 +171,8 @@ - match: { hits.hits.1._id: "2" } - do: - index: test search: + index: test body: query: function_score: @@ -193,8 +193,8 @@ - match: { hits.hits.1._id: "1" } - do: - index: test search: + index: test body: query: function_score: @@ -215,8 +215,8 @@ - match: { hits.hits.1._id: "1" } - do: - index: test search: + index: test body: query: function_score: @@ -237,8 +237,8 @@ - match: { hits.hits.1._id: "1" } - do: - index: test search: + index: test body: query: function_score: @@ -274,8 +274,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: function_score: @@ -325,8 +325,8 @@ - do: - index: test search: + index: test body: query: function_score: @@ -364,8 +364,8 @@ - do: - index: test search: + index: test body: script_fields: foobar: @@ -391,8 +391,8 @@ - do: - index: test search: + index: test body: aggs: value_agg: @@ -428,8 +428,8 @@ - do: catch: bad_request - index: test search: + index: test body: aggs: genre: diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index f5e234f66ca57..87c7c07007eb7 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -120,7 +120,7 @@ public void testResponseOnSearchFailure() throws Exception { assertThat(e.getMessage(), either(containsString("all shards failed")) .or(containsString("No search context found")) - .or(containsString("no such index")) + .or(containsString("no such index [source]")) ); return; } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/25_no_auto_create.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/25_no_auto_create.yml index 874174fda4c93..961084a5c04cb 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/25_no_auto_create.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/25_no_auto_create.yml @@ -19,7 +19,7 @@ teardown: transient: action.auto_create_index: false - do: - catch: /no such index and \[action.auto_create_index\] is \[false\]/ + catch: /no such index \[dest\] and \[action.auto_create_index\] is \[false\]/ reindex: body: source: @@ -41,7 +41,7 @@ teardown: id: 1 body: { "text": "test" } - do: - catch: /no such index and \[action.auto_create_index\] \(\[test\]\) doesn't match/ + catch: /no such index \[dest\] and \[action.auto_create_index\] \(\[test\]\) doesn't match/ reindex: body: source: @@ -63,7 +63,7 @@ teardown: id: 1 body: { "text": "test" } - do: - catch: /no such index and \[action.auto_create_index\] contains \[-dest\] which forbids automatic creation of the index/ + catch: /no such index \[dest\] and \[action.auto_create_index\] contains \[-dest\] which forbids automatic creation of the index/ reindex: body: source: diff --git a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml index b8181040665a1..1adbfc73bc7b8 100644 --- a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml +++ b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml @@ -103,6 +103,9 @@ teardown: --- "Module repository-url is loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java index 17b374ecb3765..185916ffe3c59 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java @@ -38,7 +38,6 @@ import static org.hamcrest.Matchers.greaterThan; public class Netty4HeadBodyIsEmptyIT extends ESRestTestCase { - public void testHeadRoot() throws IOException { headTestCase("/", emptyMap(), greaterThan(0)); headTestCase("/", singletonMap("pretty", ""), greaterThan(0)); @@ -75,6 +74,12 @@ public void testIndexExists() throws IOException { headTestCase("/test", singletonMap("pretty", "true"), greaterThan(0)); } + @Override + protected boolean getStrictDeprecationMode() { + // Remove this override when we remove the reference to types below + return false; + } + public void testTypeExists() throws IOException { createTestDoc(); headTestCase("/test/_mapping/test", emptyMap(), greaterThan(0)); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 4e63727024fb0..e7faac8ae01db 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -73,7 +73,7 @@ protected Version getCurrentVersion() { } }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet()); + MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; } diff --git a/modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yml b/modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yml index e8b23fa71408b..19728c7d34cff 100644 --- a/modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yml +++ b/modules/transport-netty4/src/test/resources/rest-api-spec/test/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for Netty transport # "Netty loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/discovery-azure-classic/src/test/resources/rest-api-spec/test/discovery_azure_classic/10_basic.yml b/plugins/discovery-azure-classic/src/test/resources/rest-api-spec/test/discovery_azure_classic/10_basic.yml index 6d12da177ea66..39aa9929f8a92 100644 --- a/plugins/discovery-azure-classic/src/test/resources/rest-api-spec/test/discovery_azure_classic/10_basic.yml +++ b/plugins/discovery-azure-classic/src/test/resources/rest-api-spec/test/discovery_azure_classic/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for Azure Classic Discovery component # "Discovery Azure Classic loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/discovery-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml b/plugins/discovery-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml index 3c5866663b94b..ba51c623fe888 100644 --- a/plugins/discovery-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml +++ b/plugins/discovery-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for Discovery EC2 component # "Discovery EC2 loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/discovery-gce/src/test/resources/rest-api-spec/test/discovery_gce/10_basic.yml b/plugins/discovery-gce/src/test/resources/rest-api-spec/test/discovery_gce/10_basic.yml index f16599c40fa32..a5379c2c68bed 100644 --- a/plugins/discovery-gce/src/test/resources/rest-api-spec/test/discovery_gce/10_basic.yml +++ b/plugins/discovery-gce/src/test/resources/rest-api-spec/test/discovery_gce/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for Discovery GCE components # "Discovery GCE loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/examples/custom-suggester/src/test/resources/rest-api-spec/test/custom-suggester/10_basic.yml b/plugins/examples/custom-suggester/src/test/resources/rest-api-spec/test/custom-suggester/10_basic.yml index 29fbcdac99dd3..ed8d0f78a092b 100644 --- a/plugins/examples/custom-suggester/src/test/resources/rest-api-spec/test/custom-suggester/10_basic.yml +++ b/plugins/examples/custom-suggester/src/test/resources/rest-api-spec/test/custom-suggester/10_basic.yml @@ -1,6 +1,9 @@ # tests that the custom suggester plugin is installed --- "plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java index ca35db5a81b3d..d0b03708a08b3 100644 --- a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java @@ -21,10 +21,12 @@ import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistInstanceBinding; import org.elasticsearch.painless.spi.WhitelistLoader; import org.elasticsearch.script.FieldScript; import org.elasticsearch.script.ScriptContext; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -37,6 +39,14 @@ public class ExampleWhitelistExtension implements PainlessExtension { @Override public Map, List> getContextWhitelists() { - return Collections.singletonMap(FieldScript.CONTEXT, Collections.singletonList(WHITELIST)); + ExampleWhitelistedInstance ewi = new ExampleWhitelistedInstance(1); + WhitelistInstanceBinding addValue = new WhitelistInstanceBinding("example addValue", ewi, + "addValue", "int", Collections.singletonList("int")); + WhitelistInstanceBinding getValue = new WhitelistInstanceBinding("example getValue", ewi, + "getValue", "int", Collections.emptyList()); + Whitelist instanceWhitelist = new Whitelist(ewi.getClass().getClassLoader(), Collections.emptyList(), + Collections.emptyList(), Collections.emptyList(), Arrays.asList(addValue, getValue)); + + return Collections.singletonMap(FieldScript.CONTEXT, Arrays.asList(WHITELIST, instanceWhitelist)); } } diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedInstance.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedInstance.java new file mode 100644 index 0000000000000..1d48cecb9b45d --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedInstance.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.painlesswhitelist; + +public class ExampleWhitelistedInstance { + private final int value; + + public ExampleWhitelistedInstance(int value) { + this.value = value; + } + + public int addValue(int value) { + return this.value + value; + } + + public int getValue() { + return value; + } +} diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/10_basic.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/10_basic.yml index a915c08067e5c..1b8870582375d 100644 --- a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/10_basic.yml +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for the painless whitelist example plugin # "Plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml index bbb0b44ef1d45..b864edaa2a962 100644 --- a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml @@ -11,8 +11,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: match_all: {} diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/30_static.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/30_static.yml index b659263729607..1dbaf655bf501 100644 --- a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/30_static.yml +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/30_static.yml @@ -11,8 +11,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: match_all: {} diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/40_instance.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/40_instance.yml new file mode 100644 index 0000000000000..712294baa6d7f --- /dev/null +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/40_instance.yml @@ -0,0 +1,41 @@ +# Example tests using an instance binding + +" custom instance binding": +- do: + index: + index: test + type: test + id: 1 + body: { "num1": 1 } +- do: + indices.refresh: {} + +- do: + search: + index: test + body: + query: + match_all: {} + script_fields: + sNum1: + script: + source: "addValue((int)doc['num1'][0])" + lang: painless + +- match: { hits.total: 1 } +- match: { hits.hits.0.fields.sNum1.0: 2 } + +- do: + search: + index: test + body: + query: + match_all: {} + script_fields: + sNum1: + script: + source: "getValue() + doc['num1'][0]" + lang: painless + +- match: { hits.total: 1 } +- match: { hits.hits.0.fields.sNum1.0: 2 } diff --git a/plugins/examples/rescore/src/test/resources/rest-api-spec/test/example-rescore/10_basic.yml b/plugins/examples/rescore/src/test/resources/rest-api-spec/test/example-rescore/10_basic.yml index 62a47df9d7869..f0d0bcb35fad9 100644 --- a/plugins/examples/rescore/src/test/resources/rest-api-spec/test/example-rescore/10_basic.yml +++ b/plugins/examples/rescore/src/test/resources/rest-api-spec/test/example-rescore/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for the expert scoring script example plugin # "Plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/10_basic.yml b/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/10_basic.yml index 26980a95b730b..70842d5e767e5 100644 --- a/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/10_basic.yml +++ b/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for the expert scoring script example plugin # "Plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/10_basic.yml b/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/10_basic.yml index 42be90f77f944..607fa5bf8b781 100644 --- a/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/10_basic.yml +++ b/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/10_basic.yml @@ -1,4 +1,7 @@ "Ingest attachment plugin installed": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yml b/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yml index 413745eab4051..ef6346d425645 100644 --- a/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yml +++ b/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yml @@ -1,4 +1,7 @@ "Ingest plugin installed": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/10_basic.yml b/plugins/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/10_basic.yml index 4cb1c9b1fba20..8fef34604b75d 100644 --- a/plugins/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/10_basic.yml +++ b/plugins/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/10_basic.yml @@ -1,4 +1,7 @@ "ingest-user-agent plugin installed": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml b/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml index 199d543dda87e..fe21a295e37bb 100644 --- a/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml +++ b/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for repository-azure # "Plugin repository-azure is loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml index 5c8fa70bb7a5f..072836280b3bc 100644 --- a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml +++ b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for repository-gcs # "Plugin repository-gcs is loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yml index f11e0148402cf..bc419d75ba773 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yml @@ -3,6 +3,9 @@ # Check plugin is installed # "Plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yml index f11e0148402cf..bc419d75ba773 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yml @@ -3,6 +3,9 @@ # Check plugin is installed # "Plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 3895500e55b50..888a9842833a1 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -4,6 +4,7 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture import org.elasticsearch.gradle.test.ClusterConfiguration import org.elasticsearch.gradle.test.RestIntegTestTask +import com.carrotsearch.gradle.junit4.RandomizedTestingTask import java.lang.reflect.Field @@ -68,11 +69,14 @@ bundlePlugin { } } -additionalTest('testRepositoryCreds'){ +task testRepositoryCreds(type: RandomizedTestingTask) { include '**/RepositoryCredentialsTests.class' include '**/S3BlobStoreRepositoryTests.class' systemProperty 'es.allow_insecure_settings', 'true' + classpath = tasks.test.classpath + testClassesDirs = tasks.test.testClassesDirs } +project.check.dependsOn(testRepositoryCreds) test { // these are tested explicitly in separate test tasks diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml index 190a628f0b375..cde14321805f5 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for repository-s3 # "Plugin repository-s3 is loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/store-smb/src/test/resources/rest-api-spec/test/store_smb/10_basic.yml b/plugins/store-smb/src/test/resources/rest-api-spec/test/store_smb/10_basic.yml index 60228c1b92356..8956b3a8c116f 100644 --- a/plugins/store-smb/src/test/resources/rest-api-spec/test/store_smb/10_basic.yml +++ b/plugins/store-smb/src/test/resources/rest-api-spec/test/store_smb/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for SMB Store component # "SMB Store loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index 8f6d78b481ddf..33d40b9f735fa 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -77,7 +77,7 @@ protected Version getCurrentVersion() { } }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet()); + MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 6df1854cc22aa..6f878d24c871a 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -60,6 +60,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; /** * Tests to run before and after a full cluster restart. This is run twice, @@ -75,7 +76,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { private String index; @Before - public void setIndex() { + public void setIndex() throws IOException { index = getTestName().toLowerCase(Locale.ROOT); } @@ -283,7 +284,8 @@ public void testClusterState() throws Exception { if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); - mappingsAndSettings.field("template", index); + mappingsAndSettings.field("index_patterns", index); + mappingsAndSettings.field("order", "1000"); { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); @@ -323,6 +325,7 @@ public void testClusterState() throws Exception { } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/34853") public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; @@ -361,6 +364,7 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); + shrinkIndexRequest.addParameter("copy_settings", "true"); shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); client().performRequest(shrinkIndexRequest); @@ -844,7 +848,7 @@ public void testSnapshotRestore() throws IOException { // Stick a template into the cluster so we can see it after the restore XContentBuilder templateBuilder = JsonXContent.contentBuilder().startObject(); - templateBuilder.field("template", "evil_*"); // Don't confuse other tests by applying the template + templateBuilder.field("index_patterns", "evil_*"); // Don't confuse other tests by applying the template templateBuilder.startObject("settings"); { templateBuilder.field("number_of_shards", 1); } @@ -949,9 +953,23 @@ private void checkSnapshot(String snapshotName, int count, Version tookOnVersion assertEquals(singletonList(tookOnVersion.toString()), XContentMapValues.extractValue("snapshots.version", listSnapshotResponse)); // Remove the routing setting and template so we can test restoring them. - Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); - clearRoutingFromSettings.setJsonEntity("{\"persistent\":{\"cluster.routing.allocation.exclude.test_attr\": null}}"); - client().performRequest(clearRoutingFromSettings); + try { + Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); + clearRoutingFromSettings.setJsonEntity("{\"persistent\":{\"cluster.routing.allocation.exclude.test_attr\": null}}"); + client().performRequest(clearRoutingFromSettings); + } catch (ResponseException e) { + if (e.getResponse().hasWarnings() + && (isRunningAgainstOldCluster() == false || getOldClusterVersion().onOrAfter(Version.V_6_5_0))) { + e.getResponse().getWarnings().stream().forEach(warning -> { + assertThat(warning, containsString( + "setting was deprecated in Elasticsearch and will be removed in a future release! " + + "See the breaking changes documentation for the next major version.")); + assertThat(warning, startsWith("[search.remote.")); + }); + } else { + throw e; + } + } client().performRequest(new Request("DELETE", "/_template/test_template")); // Restore diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 2b7250f86b7cd..c3cd8f61538fe 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -196,7 +196,7 @@ public void testQueryBuilderBWC() throws Exception { QueryBuilder expectedQueryBuilder = (QueryBuilder) CANDIDATES.get(i)[1]; Request request = new Request("GET", "/" + index + "/_search"); request.setJsonEntity("{\"query\": {\"ids\": {\"values\": [\"" + Integer.toString(i) + "\"]}}, " + - "\"docvalue_fields\" : [\"query.query_builder_field\"]}"); + "\"docvalue_fields\": [{\"field\":\"query.query_builder_field\", \"format\":\"use_field_mapping\"}]}"); Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); Map hitRsp = (Map) ((List) ((Map)toMap(rsp).get("hits")).get("hits")).get(0); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index 3898746e5c374..0b186db0f7a9f 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -20,12 +20,18 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import java.io.IOException; import java.nio.charset.StandardCharsets; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + /** * Basic test that indexed documents survive the rolling restart. See * {@link RecoveryIT} for much more in depth testing of the mechanism @@ -60,6 +66,26 @@ public void testIndexing() throws IOException { } if (CLUSTER_TYPE == ClusterType.OLD) { + { + Version minimumIndexCompatibilityVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); + assertThat("this branch is not needed if we aren't compatible with 6.0", + minimumIndexCompatibilityVersion.onOrBefore(Version.V_6_0_0), equalTo(true)); + if (minimumIndexCompatibilityVersion.before(Version.V_7_0_0_alpha1)) { + XContentBuilder template = jsonBuilder(); + template.startObject(); + { + template.field("index_patterns", "*"); + template.startObject("settings"); + template.field("number_of_shards", 5); + template.endObject(); + } + template.endObject(); + Request createTemplate = new Request("PUT", "/_template/template"); + createTemplate.setJsonEntity(Strings.toString(template)); + client().performRequest(createTemplate); + } + } + Request createTestIndex = new Request("PUT", "/test_index"); createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); client().performRequest(createTestIndex); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml index dc7242f28754a..536cb28d5484b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml @@ -68,6 +68,7 @@ - skip: version: " - 6.99.99" + features: headers reason: include_type_name was introduced in 7.0.0 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml index 233ff32b4184b..7e763cded3185 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml @@ -58,6 +58,8 @@ --- "empty action": + - skip: + features: headers - do: catch: /Malformed action\/metadata line \[3\], expected FIELD_NAME but found \[END_OBJECT\]/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml index fb884ddfca2c8..a14423cef1154 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml @@ -66,9 +66,9 @@ setup: "Least impact smoke test": # only passing these parameters to make sure they are consumed - do: - max_concurrent_shard_requests: 1 - max_concurrent_searches: 1 msearch: + max_concurrent_shard_requests: 1 + max_concurrent_searches: 1 body: - index: index_* - query: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml index 57bf5b629b76a..4fdfc378bee26 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml @@ -22,6 +22,7 @@ "tasks_list headers": - skip: version: " - 6.99.99" + features: headers reason: task headers has been added in 7.0.0 - do: diff --git a/server/build.gradle b/server/build.gradle index 85c7f45cf7efe..412e067782782 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -17,9 +17,7 @@ * under the License. */ - import com.carrotsearch.gradle.junit4.RandomizedTestingTask -import org.elasticsearch.gradle.BuildPlugin apply plugin: 'elasticsearch.build' apply plugin: 'nebula.optional-base' @@ -322,7 +320,6 @@ if (isEclipse == false || project.path == ":server-tests") { group: JavaBasePlugin.VERIFICATION_GROUP, description: 'Multi-node tests', dependsOn: test.dependsOn) { - configure(BuildPlugin.commonTestConfig(project)) classpath = project.test.classpath testClassesDirs = project.test.testClassesDirs include '**/*IT.class' diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 9f7566662174a..923a76c0acb20 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -19,13 +19,13 @@ package org.elasticsearch; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.Index; import org.elasticsearch.rest.RestStatus; @@ -47,7 +47,7 @@ public final class ExceptionsHelper { - private static final Logger logger = Loggers.getLogger(ExceptionsHelper.class); + private static final Logger logger = LogManager.getLogger(ExceptionsHelper.class); public static RuntimeException convertToRuntime(Exception e) { if (e instanceof RuntimeException) { diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 5a921098b4c8c..0f636f76d8ae5 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -107,6 +107,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_4_3 = new Version(V_6_4_3_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + public static final int V_6_6_0_ID = 6060099; + public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); @@ -125,6 +127,8 @@ public static Version fromId(int id) { switch (id) { case V_7_0_0_alpha1_ID: return V_7_0_0_alpha1; + case V_6_6_0_ID: + return V_6_6_0; case V_6_5_0_ID: return V_6_5_0; case V_6_4_3_ID: diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index d254f989d4a29..5b1b3dd2158ed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.admin.indices.template.put; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; @@ -34,7 +35,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -69,7 +69,7 @@ */ public class PutIndexTemplateRequest extends MasterNodeRequest implements IndicesRequest, ToXContent { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(PutIndexTemplateRequest.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(PutIndexTemplateRequest.class)); private String name; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index cf5f94a97380b..2f5db520088e9 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.action.bulk; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.threadpool.Scheduler; import java.util.concurrent.CountDownLatch; @@ -43,7 +43,7 @@ public final class BulkRequestHandler { BulkRequestHandler(BiConsumer> consumer, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, Scheduler scheduler, int concurrentRequests) { assert concurrentRequests >= 0; - this.logger = Loggers.getLogger(getClass()); + this.logger = LogManager.getLogger(getClass()); this.consumer = consumer; this.listener = listener; this.concurrentRequests = concurrentRequests; diff --git a/server/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/server/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index d834d80338432..f530a81e51daa 100644 --- a/server/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/server/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -73,10 +73,10 @@ public boolean shouldAutoCreate(String index, ClusterState state) { // One volatile read, so that all checks are done against the same instance: final AutoCreate autoCreate = this.autoCreate; if (autoCreate.autoCreateIndex == false) { - throw new IndexNotFoundException("no such index and [" + AUTO_CREATE_INDEX_SETTING.getKey() + "] is [false]", index); + throw new IndexNotFoundException("[" + AUTO_CREATE_INDEX_SETTING.getKey() + "] is [false]", index); } if (dynamicMappingDisabled) { - throw new IndexNotFoundException("no such index and [" + MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey() + "] is [false]", + throw new IndexNotFoundException("[" + MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey() + "] is [false]", index); } // matches not set, default value of "true" @@ -90,11 +90,11 @@ public boolean shouldAutoCreate(String index, ClusterState state) { if (include) { return true; } - throw new IndexNotFoundException("no such index and [" + AUTO_CREATE_INDEX_SETTING.getKey() + "] contains [-" + throw new IndexNotFoundException("[" + AUTO_CREATE_INDEX_SETTING.getKey() + "] contains [-" + indexExpression + "] which forbids automatic creation of the index", index); } } - throw new IndexNotFoundException("no such index and [" + AUTO_CREATE_INDEX_SETTING.getKey() + "] ([" + autoCreate + throw new IndexNotFoundException("[" + AUTO_CREATE_INDEX_SETTING.getKey() + "] ([" + autoCreate + "]) doesn't match", index); } diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java index 943c36797096c..d99f2e620c1c3 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.support; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ListenableActionFuture; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -123,7 +123,7 @@ private void executeListener(final ActionListener listener) { private static final class DispatchingListenableActionFuture extends PlainListenableActionFuture { - private static final Logger logger = Loggers.getLogger(DispatchingListenableActionFuture.class); + private static final Logger logger = LogManager.getLogger(DispatchingListenableActionFuture.class); private final ThreadPool threadPool; private DispatchingListenableActionFuture(ThreadPool threadPool) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index befe3a00ac1fa..e615dcf8aced0 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -95,7 +95,7 @@ public void run() { /** initialize native resources */ public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean systemCallFilter, boolean ctrlHandler) { - final Logger logger = Loggers.getLogger(Bootstrap.class); + final Logger logger = LogManager.getLogger(Bootstrap.class); // check if the user is running as root, and bail if (Natives.definitelyRunningAsRoot()) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index c5a8e806f41a4..0c433192ad6c0 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -19,12 +19,12 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.DiscoveryModule; @@ -90,7 +90,7 @@ static void check( final BootstrapContext context, final boolean enforceLimits, final List checks) throws NodeValidationException { - check(context, enforceLimits, checks, Loggers.getLogger(BootstrapChecks.class)); + check(context, enforceLimits, checks, LogManager.getLogger(BootstrapChecks.class)); } /** @@ -417,7 +417,7 @@ public BootstrapCheckResult check(final BootstrapContext context) { // visible for testing long getMaxMapCount() { - return getMaxMapCount(Loggers.getLogger(BootstrapChecks.class)); + return getMaxMapCount(LogManager.getLogger(BootstrapChecks.class)); } // visible for testing diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index 1ef9b7740c205..1c3c0ccf6b61f 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -19,17 +19,17 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.Loggers; import java.io.IOError; import java.security.AccessController; import java.security.PrivilegedAction; class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler { - private static final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class); + private static final Logger logger = LogManager.getLogger(ElasticsearchUncaughtExceptionHandler.class); @Override public void uncaughtException(Thread t, Throwable e) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java b/server/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java index 64dabe9236306..f510480cd5466 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java @@ -22,9 +22,10 @@ import com.sun.jna.Native; import com.sun.jna.NativeLong; import com.sun.jna.Structure; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.logging.Loggers; import java.util.Arrays; import java.util.List; @@ -34,7 +35,7 @@ */ final class JNACLibrary { - private static final Logger logger = Loggers.getLogger(JNACLibrary.class); + private static final Logger logger = LogManager.getLogger(JNACLibrary.class); public static final int MCL_CURRENT = 1; public static final int ENOMEM = 12; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java index 99574c2b39bb6..b843d39cbd113 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java @@ -26,9 +26,10 @@ import com.sun.jna.Structure; import com.sun.jna.WString; import com.sun.jna.win32.StdCallLibrary; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.logging.Loggers; import java.util.ArrayList; import java.util.Arrays; @@ -41,7 +42,7 @@ */ final class JNAKernel32Library { - private static final Logger logger = Loggers.getLogger(JNAKernel32Library.class); + private static final Logger logger = LogManager.getLogger(JNAKernel32Library.class); // Callbacks must be kept around in order to be able to be called later, // when the Windows ConsoleCtrlHandler sends an event. diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index 4a40db846e0df..8e86f6aa4b779 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -22,9 +22,10 @@ import com.sun.jna.Native; import com.sun.jna.Pointer; import com.sun.jna.WString; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.monitor.jvm.JvmInfo; import java.nio.file.Path; @@ -40,7 +41,7 @@ class JNANatives { /** no instantiation */ private JNANatives() {} - private static final Logger logger = Loggers.getLogger(JNANatives.class); + private static final Logger logger = LogManager.getLogger(JNANatives.class); // Set to true, in case native mlockall call was successful static boolean LOCAL_MLOCKALL = false; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java index 9bd2a5c2f3dc7..10117698e9338 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java @@ -19,8 +19,8 @@ package org.elasticsearch.bootstrap; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import java.nio.file.Path; @@ -32,7 +32,7 @@ final class Natives { /** no instantiation */ private Natives() {} - private static final Logger logger = Loggers.getLogger(Natives.class); + private static final Logger logger = LogManager.getLogger(Natives.class); // marker to determine if the JNA class files are available to the JVM static final boolean JNA_AVAILABLE; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java b/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java index c6667bee4cd2b..59f8bd5daf77b 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java @@ -26,10 +26,11 @@ import com.sun.jna.Pointer; import com.sun.jna.Structure; import com.sun.jna.ptr.PointerByReference; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.nio.ByteBuffer; @@ -91,7 +92,7 @@ */ // not an example of how to write code!!! final class SystemCallFilter { - private static final Logger logger = Loggers.getLogger(SystemCallFilter.class); + private static final Logger logger = LogManager.getLogger(SystemCallFilter.class); // Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 7e2d92563035e..608e89514f25b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -20,6 +20,8 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; @@ -33,7 +35,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ToXContent; @@ -52,7 +53,7 @@ public class IndexTemplateMetaData extends AbstractDiffable { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(IndexTemplateMetaData.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(IndexTemplateMetaData.class)); private final String name; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index bafbea2e72748..19c3de722793a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.action.AliasesRequest; import org.elasticsearch.cluster.ClusterState; @@ -42,7 +43,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -83,7 +83,7 @@ public class MetaData implements Iterable, Diffable, ToXContentFragment { - private static final Logger logger = Loggers.getLogger(MetaData.class); + private static final Logger logger = LogManager.getLogger(MetaData.class); public static final String ALL = "_all"; @@ -124,9 +124,11 @@ public enum XContentContext { public interface Custom extends NamedDiffable, ToXContentFragment, ClusterState.FeatureAware { EnumSet context(); - } + public static final Setting SETTING_CLUSTER_MAX_SHARDS_PER_NODE = + Setting.intSetting("cluster.max_shards_per_node", 1000, 1, Property.Dynamic, Property.NodeScope); + public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope); @@ -162,6 +164,7 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust private final ImmutableOpenMap customs; private final transient int totalNumberOfShards; // Transient ? not serializable anyway? + private final int totalOpenIndexShards; private final int numberOfShards; private final String[] allIndices; @@ -183,12 +186,17 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust this.customs = customs; this.templates = templates; int totalNumberOfShards = 0; + int totalOpenIndexShards = 0; int numberOfShards = 0; for (ObjectCursor cursor : indices.values()) { totalNumberOfShards += cursor.value.getTotalNumberOfShards(); numberOfShards += cursor.value.getNumberOfShards(); + if (IndexMetaData.State.OPEN.equals(cursor.value.getState())) { + totalOpenIndexShards += cursor.value.getTotalNumberOfShards(); + } } this.totalNumberOfShards = totalNumberOfShards; + this.totalOpenIndexShards = totalOpenIndexShards; this.numberOfShards = numberOfShards; this.allIndices = allIndices; @@ -667,10 +675,29 @@ public T custom(String type) { } + /** + * Gets the total number of shards from all indices, including replicas and + * closed indices. + * @return The total number shards from all indices. + */ public int getTotalNumberOfShards() { return this.totalNumberOfShards; } + /** + * Gets the total number of open shards from all indices. Includes + * replicas, but does not include shards that are part of closed indices. + * @return The total number of open shards from all indices. + */ + public int getTotalOpenIndexShards() { + return this.totalOpenIndexShards; + } + + /** + * Gets the number of primary shards from all indices, not including + * replicas. + * @return The number of primary shards from all indices. + */ public int getNumberOfShards() { return this.numberOfShards; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 9466b03c442a0..c327da8afee22 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -82,6 +83,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; @@ -587,12 +589,16 @@ public void onFailure(String source, Exception e) { private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { validateIndexName(request.index(), state); - validateIndexSettings(request.index(), request.settings(), forbidPrivateIndexSettings); + validateIndexSettings(request.index(), request.settings(), state, forbidPrivateIndexSettings); } - public void validateIndexSettings( - final String indexName, final Settings settings, final boolean forbidPrivateIndexSettings) throws IndexCreationException { + public void validateIndexSettings(String indexName, final Settings settings, final ClusterState clusterState, + final boolean forbidPrivateIndexSettings) throws IndexCreationException { List validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings); + + Optional shardAllocation = checkShardLimit(settings, clusterState, deprecationLogger); + shardAllocation.ifPresent(validationErrors::add); + if (validationErrors.isEmpty() == false) { ValidationException validationException = new ValidationException(); validationException.addValidationErrors(validationErrors); @@ -600,6 +606,21 @@ public void validateIndexSettings( } } + /** + * Checks whether an index can be created without going over the cluster shard limit. + * + * @param settings The settings of the index to be created. + * @param clusterState The current cluster state. + * @param deprecationLogger The logger to use to emit a deprecation warning, if appropriate. + * @return If present, an error message to be used to reject index creation. If empty, a signal that this operation may be carried out. + */ + static Optional checkShardLimit(Settings settings, ClusterState clusterState, DeprecationLogger deprecationLogger) { + int shardsToCreate = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(settings) + * (1 + IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings)); + + return IndicesService.checkShardLimit(shardsToCreate, clusterState, deprecationLogger); + } + List getIndexSettingsValidationErrors(final Settings settings, final boolean forbidPrivateIndexSettings) { String customPath = IndexMetaData.INDEX_DATA_PATH_SETTING.get(settings); List validationErrors = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 038c03f342a34..f7482edd10d0e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -36,8 +36,10 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.indices.IndicesService; @@ -50,6 +52,7 @@ import java.util.Arrays; import java.util.HashSet; import java.util.List; +import java.util.Optional; import java.util.Set; /** @@ -175,6 +178,8 @@ public ClusterState execute(ClusterState currentState) { } } + validateShardLimit(currentState, request.indices(), deprecationLogger); + if (indicesToOpen.isEmpty()) { return currentState; } @@ -217,4 +222,33 @@ public ClusterState execute(ClusterState currentState) { }); } + /** + * Validates whether a list of indices can be opened without going over the cluster shard limit. Only counts indices which are + * currently closed and will be opened, ignores indices which are already open. + * + * @param currentState The current cluster state. + * @param indices The indices which are to be opened. + * @param deprecationLogger The logger to use to emit a deprecation warning, if appropriate. + * @throws ValidationException If this operation would take the cluster over the limit and enforcement is enabled. + */ + static void validateShardLimit(ClusterState currentState, Index[] indices, DeprecationLogger deprecationLogger) { + int shardsToOpen = Arrays.stream(indices) + .filter(index -> currentState.metaData().index(index).getState().equals(IndexMetaData.State.CLOSE)) + .mapToInt(index -> getTotalShardCount(currentState, index)) + .sum(); + + Optional error = IndicesService.checkShardLimit(shardsToOpen, currentState, deprecationLogger); + if (error.isPresent()) { + ValidationException ex = new ValidationException(); + ex.addValidationError(error.get()); + throw ex; + } + + } + + private static int getTotalShardCount(ClusterState state, Index index) { + IndexMetaData indexMetaData = state.metaData().index(index); + return indexMetaData.getNumberOfShards() * (1 + indexMetaData.getNumberOfReplicas()); + } + } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 75fcdced678ab..c89e6ddba9546 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -45,9 +46,11 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Arrays; import java.util.HashSet; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Set; import static org.elasticsearch.action.support.ContextPreservingActionListener.wrapPreservingContext; @@ -115,6 +118,7 @@ protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) { + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()); @@ -141,6 +145,18 @@ public ClusterState execute(ClusterState currentState) { int updatedNumberOfReplicas = openSettings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, -1); if (updatedNumberOfReplicas != -1 && preserveExisting == false) { + + // Verify that this won't take us over the cluster shard limit. + int totalNewShards = Arrays.stream(request.indices()) + .mapToInt(i -> getTotalNewShards(i, currentState, updatedNumberOfReplicas)) + .sum(); + Optional error = IndicesService.checkShardLimit(totalNewShards, currentState, deprecationLogger); + if (error.isPresent()) { + ValidationException ex = new ValidationException(); + ex.addValidationError(error.get()); + throw ex; + } + // we do *not* update the in sync allocation ids as they will be removed upon the first index // operation which make these copies stale // TODO: update the list once the data is deleted by the node? @@ -224,6 +240,14 @@ public ClusterState execute(ClusterState currentState) { }); } + private int getTotalNewShards(Index index, ClusterState currentState, int updatedNumberOfReplicas) { + IndexMetaData indexMetaData = currentState.metaData().index(index); + int shardsInIndex = indexMetaData.getNumberOfShards(); + int oldNumberOfReplicas = indexMetaData.getNumberOfReplicas(); + int replicaIncrease = updatedNumberOfReplicas - oldNumberOfReplicas; + return replicaIncrease * shardsInIndex; + } + /** * Updates the cluster block only iff the setting exists in the given settings */ diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java index afeb0e0dab1d9..a167ced5bc271 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation.command; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -32,7 +33,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexNotFoundException; @@ -154,7 +154,7 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) discoNode + ", shard is primary and " + shardRouting.state().name().toLowerCase(Locale.ROOT)); } } - routingNodes.failShard(Loggers.getLogger(CancelAllocationCommand.class), shardRouting, + routingNodes.failShard(LogManager.getLogger(CancelAllocationCommand.class), shardRouting, new UnassignedInfo(UnassignedInfo.Reason.REROUTE_CANCELLED, null), indexMetaData, allocation.changes()); // TODO: We don't have to remove a cancelled shard from in-sync set once we have a strict resync implementation. allocation.removeAllocationId(shardRouting); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 8927adfd43458..d720e9d603fe9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.service; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Assertions; @@ -38,7 +39,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.TimeValue; @@ -553,7 +553,7 @@ public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { private static class AckCountDownListener implements Discovery.AckListener { - private static final Logger logger = Loggers.getLogger(AckCountDownListener.class); + private static final Logger logger = LogManager.getLogger(AckCountDownListener.class); private final AckedClusterStateTaskListener ackedTaskListener; private final CountDown countDown; diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java index 6d930953e29ba..c862ea2aa274f 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java @@ -16,6 +16,7 @@ package org.elasticsearch.common.inject.spi; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Binder; @@ -40,7 +41,6 @@ import org.elasticsearch.common.inject.internal.ProviderMethodsModule; import org.elasticsearch.common.inject.internal.SourceProvider; import org.elasticsearch.common.inject.matcher.Matcher; -import org.elasticsearch.common.logging.Loggers; import java.lang.annotation.Annotation; import java.util.ArrayList; @@ -338,7 +338,7 @@ public void annotatedWith(Annotation annotation) { return builder; } - private static Logger logger = Loggers.getLogger(Elements.class); + private static Logger logger = LogManager.getLogger(Elements.class); protected Object getSource() { Object ret; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 5edc1659f54f7..6c55ef2e9343d 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Scorable; -import org.elasticsearch.script.ExplainableSearchScript; +import org.elasticsearch.script.ExplainableScoreScript; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.Script; @@ -75,11 +75,11 @@ public double score(int docId, float subQueryScore) throws IOException { @Override public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { Explanation exp; - if (leafScript instanceof ExplainableSearchScript) { + if (leafScript instanceof ExplainableScoreScript) { leafScript.setDocument(docId); scorer.docid = docId; scorer.score = subQueryScore.getValue().floatValue(); - exp = ((ExplainableSearchScript) leafScript).explain(subQueryScore); + exp = ((ExplainableScoreScript) leafScript).explain(subQueryScore); } else { double score = score(docId, subQueryScore.getValue().floatValue()); String explanation = "script score function, computed with script:\"" + sScript + "\""; diff --git a/server/src/main/java/org/elasticsearch/common/network/IfConfig.java b/server/src/main/java/org/elasticsearch/common/network/IfConfig.java index a190643d3b43f..b728c24da20aa 100644 --- a/server/src/main/java/org/elasticsearch/common/network/IfConfig.java +++ b/server/src/main/java/org/elasticsearch/common/network/IfConfig.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.network; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.net.Inet6Address; @@ -36,7 +36,7 @@ */ public final class IfConfig { - private static final Logger logger = Loggers.getLogger(IfConfig.class); + private static final Logger logger = LogManager.getLogger(IfConfig.class); private static final String INDENT = " "; /** log interface configuration at debug level, if its enabled */ diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 4b4ebb7414acb..66a4aa65c4480 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -196,6 +196,7 @@ public void apply(Settings value, Settings current, Settings previous) { MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, MetaData.SETTING_READ_ONLY_SETTING, MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING, + MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index d350c26ce5acc..06bec217acf7f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.core.internal.io.IOUtils; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; @@ -30,7 +31,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.LogConfigurator; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; @@ -346,7 +346,7 @@ public boolean hasValue(String key) { * {@link Setting} object constructed in, for example, {@link org.elasticsearch.env.Environment}. */ static class DeprecationLoggerHolder { - static DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(Settings.class)); + static DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(Settings.class)); } /** diff --git a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 9d36b6f5ff6eb..0358f8f318de0 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.unit; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; @@ -26,7 +27,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -36,7 +36,7 @@ public class ByteSizeValue implements Writeable, Comparable, ToXContentFragment { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ByteSizeValue.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ByteSizeValue.class)); public static final ByteSizeValue ZERO = new ByteSizeValue(0, ByteSizeUnit.BYTES); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java b/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java index e075fb1711de8..5b92dec573df8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.xcontent; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; /** * Logs deprecations to the {@link DeprecationLogger}. @@ -42,7 +42,7 @@ public class LoggingDeprecationHandler implements DeprecationHandler { * Changing that will require some research to make super duper * sure it is safe. */ - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ParseField.class)); private LoggingDeprecationHandler() { // Singleton diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 237b36b53d4bc..138f9501e6fe7 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.codecs.CodecUtil; @@ -30,7 +31,6 @@ import org.apache.lucene.store.OutputStreamIndexOutput; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -73,7 +73,7 @@ public abstract class MetaDataStateFormat { private final String prefix; private final Pattern stateFilePattern; - private static final Logger logger = Loggers.getLogger(MetaDataStateFormat.class); + private static final Logger logger = LogManager.getLogger(MetaDataStateFormat.class); /** * Creates a new {@link MetaDataStateFormat} instance diff --git a/server/src/main/java/org/elasticsearch/index/IndexNotFoundException.java b/server/src/main/java/org/elasticsearch/index/IndexNotFoundException.java index 4442ee276c9cf..2a4a89e3f9005 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexNotFoundException.java +++ b/server/src/main/java/org/elasticsearch/index/IndexNotFoundException.java @@ -28,7 +28,7 @@ public final class IndexNotFoundException extends ResourceNotFoundException { * Construct with a custom message. */ public IndexNotFoundException(String message, String index) { - super(message); + super("no such index [" + index + "] and " + message); setIndex(index); } @@ -37,7 +37,7 @@ public IndexNotFoundException(String index) { } public IndexNotFoundException(String index, Throwable cause) { - super("no such index", cause); + super("no such index [" + index + "]", cause); setIndex(index); } @@ -46,7 +46,7 @@ public IndexNotFoundException(Index index) { } public IndexNotFoundException(Index index, Throwable cause) { - super("no such index", cause); + super("no such index [" + index.getName() + "]", cause); setIndex(index); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java index 4b3643dda059a..b71dcc7593417 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.fielddata.plain; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; @@ -29,7 +30,6 @@ import org.apache.lucene.search.SortedSetSortField; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -50,7 +50,7 @@ public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData i private final IndexFieldDataCache cache; private final CircuitBreakerService breakerService; private final Function> scriptFunction; - private static final Logger logger = Loggers.getLogger(SortedSetDVOrdinalsIndexFieldData.class); + private static final Logger logger = LogManager.getLogger(SortedSetDVOrdinalsIndexFieldData.class); public SortedSetDVOrdinalsIndexFieldData(IndexSettings indexSettings, IndexFieldDataCache cache, String fieldName, CircuitBreakerService breakerService, Function> scriptFunction) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index 71a2cdb32f9df..1b81977a57205 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -19,9 +19,9 @@ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -36,7 +36,7 @@ public class DynamicTemplate implements ToXContentObject { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(DynamicTemplate.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(DynamicTemplate.class)); public enum MatchType { SIMPLE { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 5d0239f846a1f..1bda015758736 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; @@ -35,7 +36,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -117,7 +117,7 @@ public enum MergeReason { "_size", "_timestamp", "_ttl", IgnoredFieldMapper.NAME ); - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(MapperService.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(MapperService.class)); private final IndexAnalyzers indexAnalyzers; diff --git a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java index 9f1916fb719ed..cb8005ad26c38 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.query; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; @@ -28,7 +29,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; @@ -40,7 +40,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "type"; private static final ParseField VALUE_FIELD = new ParseField("value"); - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeQueryBuilder.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(TypeQueryBuilder.class)); private final String type; diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index c0a13105f84e0..b5bdc05adfb73 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.index.query.functionscore; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; import org.elasticsearch.common.lucene.search.function.ScoreFunction; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -40,7 +40,8 @@ */ public class RandomScoreFunctionBuilder extends ScoreFunctionBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RandomScoreFunctionBuilder.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( + LogManager.getLogger(RandomScoreFunctionBuilder.class)); public static final String NAME = "random_score"; private String field; diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java index 2713e5e2661da..18307e0a56812 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java @@ -51,7 +51,7 @@ * */ public class DeleteByQueryRequest extends AbstractBulkByScrollRequest - implements IndicesRequest.Replaceable, ToXContentObject { + implements IndicesRequest.Replaceable, ToXContentObject { public DeleteByQueryRequest() { this(new SearchRequest()); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java b/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java index 3a96259d04a8e..797d622756176 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -43,7 +43,7 @@ */ public class WorkerBulkByScrollTaskState implements SuccessfullyProcessed { - private static final Logger logger = Loggers.getLogger(WorkerBulkByScrollTaskState.class); + private static final Logger logger = LogManager.getLogger(WorkerBulkByScrollTaskState.class); /** * Maximum wait time allowed for throttling. diff --git a/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java b/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java index 430e75ed49469..c6f28732b37c2 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.shard; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.FilterMergePolicy; import org.apache.lucene.index.IndexWriter; @@ -26,7 +27,6 @@ import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.elasticsearch.Version; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.util.Collections; @@ -46,7 +46,7 @@ */ public final class ElasticsearchMergePolicy extends FilterMergePolicy { - private static Logger logger = Loggers.getLogger(ElasticsearchMergePolicy.class); + private static Logger logger = LogManager.getLogger(ElasticsearchMergePolicy.class); // True if the next merge request should do segment upgrades: private volatile boolean upgradeInProgress; diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index 54c1dd7c1db69..9535108cad3c0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -21,6 +21,8 @@ import joptsimple.OptionParser; import joptsimple.OptionSet; import joptsimple.OptionSpec; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -45,7 +47,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -74,7 +75,7 @@ public class RemoveCorruptedShardDataCommand extends EnvironmentAwareCommand { - private static final Logger logger = Loggers.getLogger(RemoveCorruptedShardDataCommand.class); + private static final Logger logger = LogManager.getLogger(RemoveCorruptedShardDataCommand.class); private final OptionSpec folderOption; private final OptionSpec indexNameOption; diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java index 9aab1260b6b48..2de877551a96f 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.similarity; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.similarities.AfterEffect; import org.apache.lucene.search.similarities.AfterEffectB; import org.apache.lucene.search.similarities.AfterEffectL; @@ -52,7 +53,6 @@ import org.apache.lucene.search.similarities.NormalizationZ; import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import java.util.Arrays; @@ -67,7 +67,7 @@ final class SimilarityProviders { private SimilarityProviders() {} // no instantiation - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SimilarityProviders.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(SimilarityProviders.class)); static final String DISCOUNT_OVERLAPS = "discount_overlaps"; private static final Map BASIC_MODELS; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index 0b9c365509685..87600f4441bdc 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.translog; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -55,7 +55,7 @@ public class TruncateTranslogAction { - protected static final Logger logger = Loggers.getLogger(TruncateTranslogAction.class); + protected static final Logger logger = LogManager.getLogger(TruncateTranslogAction.class); private final NamedXContentRegistry namedXContentRegistry; public TruncateTranslogAction(NamedXContentRegistry namedXContentRegistry) { diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 0e27d2bf1502f..206b9e7165ab0 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.CheckedFunction; @@ -52,6 +53,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -156,6 +158,21 @@ public class IndicesService extends AbstractLifecycleComponent public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), Property.NodeScope); + private static final boolean ENFORCE_MAX_SHARDS_PER_NODE; + + static { + final String ENFORCE_SHARD_LIMIT_KEY = "es.enforce_max_shards_per_node"; + final String enforceMaxShardsPerNode = System.getProperty(ENFORCE_SHARD_LIMIT_KEY); + if (enforceMaxShardsPerNode == null) { + ENFORCE_MAX_SHARDS_PER_NODE = false; + } else if ("true".equals(enforceMaxShardsPerNode)) { + ENFORCE_MAX_SHARDS_PER_NODE = true; + } else { + throw new IllegalArgumentException(ENFORCE_SHARD_LIMIT_KEY + " may only be unset or set to [true] but was [" + + enforceMaxShardsPerNode + "]"); + } + } + private final PluginsService pluginsService; private final NodeEnvironment nodeEnv; private final NamedXContentRegistry xContentRegistry; @@ -1352,4 +1369,41 @@ public Function> getFieldFilter() { public boolean isMetaDataField(String field) { return mapperRegistry.isMetaDataField(field); } + + /** + * Checks to see if an operation can be performed without taking the cluster over the cluster-wide shard limit. Adds a deprecation + * warning or returns an error message as appropriate + * + * @param newShards The number of shards to be added by this operation + * @param state The current cluster state + * @param deprecationLogger The logger to use for deprecation warnings + * @return If present, an error message to be given as the reason for failing + * an operation. If empty, a sign that the operation is valid. + */ + public static Optional checkShardLimit(int newShards, ClusterState state, DeprecationLogger deprecationLogger) { + Settings theseSettings = state.metaData().settings(); + int nodeCount = state.getNodes().getDataNodes().size(); + + // Only enforce the shard limit if we have at least one data node, so that we don't block + // index creation during cluster setup + if (nodeCount == 0 || newShards < 0) { + return Optional.empty(); + } + int maxShardsPerNode = MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(theseSettings); + int maxShardsInCluster = maxShardsPerNode * nodeCount; + int currentOpenShards = state.getMetaData().getTotalOpenIndexShards(); + + if ((currentOpenShards + newShards) > maxShardsInCluster) { + String errorMessage = "this action would add [" + newShards + "] total shards, but this cluster currently has [" + + currentOpenShards + "]/[" + maxShardsInCluster + "] maximum shards open"; + if (ENFORCE_MAX_SHARDS_PER_NODE) { + return Optional.of(errorMessage); + } else { + deprecationLogger.deprecated("In a future major version, this request will fail because {}. Before upgrading, " + + "reduce the number of shards in your cluster or adjust the cluster setting [{}].", + errorMessage, MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + } + } + return Optional.empty(); + } } diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 235d8b46e5b1a..eb2dc587bfb6d 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -19,11 +19,11 @@ package org.elasticsearch.indices.breaker; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -325,7 +325,7 @@ public void registerBreaker(BreakerSettings breakerSettings) { } else { CircuitBreaker oldBreaker; CircuitBreaker breaker = new ChildMemoryCircuitBreaker(breakerSettings, - Loggers.getLogger(CHILD_LOGGER_PREFIX + breakerSettings.getName()), + LogManager.getLogger(CHILD_LOGGER_PREFIX + breakerSettings.getName()), this, breakerSettings.getName()); for (;;) { @@ -335,7 +335,7 @@ public void registerBreaker(BreakerSettings breakerSettings) { } breaker = new ChildMemoryCircuitBreaker(breakerSettings, (ChildMemoryCircuitBreaker)oldBreaker, - Loggers.getLogger(CHILD_LOGGER_PREFIX + breakerSettings.getName()), + LogManager.getLogger(CHILD_LOGGER_PREFIX + breakerSettings.getName()), this, breakerSettings.getName()); if (breakers.replace(breakerSettings.getName(), oldBreaker, breaker)) { diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java index 2b1b2a1df478a..71472069069b8 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java @@ -19,8 +19,8 @@ package org.elasticsearch.monitor.jvm; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.logging.Loggers; import java.lang.management.ManagementFactory; @@ -41,7 +41,7 @@ private static long initializePid() { try { return Long.parseLong(name.split("@")[0]); } catch (final NumberFormatException e) { - Loggers.getLogger(JvmPid.class).debug(new ParameterizedMessage("failed parsing PID from [{}]", name), e); + LogManager.getLogger(JvmPid.class).debug(new ParameterizedMessage("failed parsing PID from [{}]", name), e); return -1; } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 911063e86410b..875b8b2149db4 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -19,6 +19,7 @@ package org.elasticsearch.node; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; @@ -66,7 +67,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; @@ -263,7 +263,7 @@ public Node(Environment environment) { */ protected Node( final Environment environment, Collection> classpathPlugins, boolean forbidPrivateIndexSettings) { - logger = Loggers.getLogger(Node.class); + logger = LogManager.getLogger(Node.class); final List resourcesToClose = new ArrayList<>(); // register everything we need to release in the case of an error boolean success = false; try { diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 6805a0e40d567..4ce489268714f 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -19,8 +19,8 @@ package org.elasticsearch.plugins; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.util.CharFilterFactory; import org.apache.lucene.analysis.util.TokenFilterFactory; import org.apache.lucene.analysis.util.TokenizerFactory; @@ -37,7 +37,6 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -385,7 +384,7 @@ private static Set findBundles(final Path directory, String type) throws // get a bundle for a single plugin dir private static Bundle readPluginBundle(final Set bundles, final Path plugin, String type) throws IOException { - Loggers.getLogger(PluginsService.class).trace("--- adding [{}] [{}]", type, plugin.toAbsolutePath()); + LogManager.getLogger(PluginsService.class).trace("--- adding [{}] [{}]", type, plugin.toAbsolutePath()); final PluginInfo info; try { info = PluginInfo.readFromProperties(plugin); diff --git a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java index b134db4451718..c8d4e2e6209d2 100644 --- a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java @@ -42,8 +42,8 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; +import org.elasticsearch.search.aggregations.pipeline.MovAvgPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.MovAvgModel; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; import org.elasticsearch.search.rescore.RescorerBuilder; diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java b/server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java index 572da497c1f2b..15e535ebfe201 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java @@ -19,9 +19,9 @@ package org.elasticsearch.rest.action; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -33,7 +33,7 @@ public abstract class RestActionListener implements ActionListener { if (xContentParser != null) { // NOTE: if rest request with xcontent body has request parameters, values parsed from request body have the precedence @@ -65,10 +65,4 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> client.clearScroll(clearRequest, new RestStatusToXContentListener<>(channel)); } - private static String[] splitScrollIds(String scrollIds) { - if (scrollIds == null) { - return Strings.EMPTY_ARRAY; - } - return Strings.splitStringByCommaToArray(scrollIds); - } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 11c91703bdcdb..b677abcd79cfa 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.rest.action.search; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; @@ -57,7 +57,7 @@ public class RestSearchAction extends BaseRestHandler { public static final String TYPED_KEYS_PARAM = "typed_keys"; private static final Set RESPONSE_PARAMS = Collections.singleton(TYPED_KEYS_PARAM); - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RestSearchAction.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(RestSearchAction.class)); public RestSearchAction(Settings settings, RestController controller) { super(settings); diff --git a/server/src/main/java/org/elasticsearch/script/ExplainableSearchScript.java b/server/src/main/java/org/elasticsearch/script/ExplainableScoreScript.java similarity index 93% rename from server/src/main/java/org/elasticsearch/script/ExplainableSearchScript.java rename to server/src/main/java/org/elasticsearch/script/ExplainableScoreScript.java index c39702127510f..d181d8c67840c 100644 --- a/server/src/main/java/org/elasticsearch/script/ExplainableSearchScript.java +++ b/server/src/main/java/org/elasticsearch/script/ExplainableScoreScript.java @@ -43,11 +43,11 @@ import java.io.IOException; /** - * To be implemented by {@link SearchScript} which can provided an {@link Explanation} of the score + * To be implemented by {@link ScoreScript} which can provided an {@link Explanation} of the score * This is currently not used inside elasticsearch but it is used, see for example here: * https://github.com/elastic/elasticsearch/issues/8561 */ -public interface ExplainableSearchScript { +public interface ExplainableScoreScript { /** * Build the explanation of the current document being scored diff --git a/server/src/main/java/org/elasticsearch/script/FieldScript.java b/server/src/main/java/org/elasticsearch/script/FieldScript.java index 98649dbb33043..29684a6447776 100644 --- a/server/src/main/java/org/elasticsearch/script/FieldScript.java +++ b/server/src/main/java/org/elasticsearch/script/FieldScript.java @@ -94,7 +94,7 @@ public void setDocument(int docid) { leafLookup.setDocument(docid); } - /** A factory to construct {@link SearchScript} instances. */ + /** A factory to construct {@link FieldScript} instances. */ public interface LeafFactory { FieldScript newInstance(LeafReaderContext ctx) throws IOException; } diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScript.java b/server/src/main/java/org/elasticsearch/script/ScoreScript.java index 11b135e9a65af..5c533298cbe13 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScript.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScript.java @@ -46,7 +46,8 @@ public abstract class ScoreScript { public ScoreScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { this.params = params; - this.leafLookup = lookup.getLeafSearchLookup(leafContext); + // null check needed b/c of expression engine subclass + this.leafLookup = lookup == null ? null : lookup.getLeafSearchLookup(leafContext); } public abstract double execute(); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 35a7c2e60d685..1ce88f7c711e6 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.script; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -30,7 +31,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -51,7 +51,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont /** * Standard deprecation logger for used to deprecate allowance of empty templates. */ - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ScriptMetaData.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ScriptMetaData.class)); /** * A builder used to modify the currently stored scripts data held within diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java index 1c53ef133de86..8e9d162c52efa 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript; +import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript; import java.util.Collections; import java.util.HashMap; diff --git a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java index e72d597a6afb4..4c51b9fed69ec 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java @@ -27,15 +27,18 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; public class ScriptedMetricAggContexts { - private abstract static class ParamsAndStateBase { + + public abstract static class InitScript { private final Map params; private final Map state; - ParamsAndStateBase(Map params, Map state) { + public InitScript(Map params, Map state) { this.params = params; this.state = state; } @@ -47,12 +50,6 @@ public Map getParams() { public Object getState() { return state; } - } - - public abstract static class InitScript extends ParamsAndStateBase { - public InitScript(Map params, Map state) { - super(params, state); - } public abstract void execute(); @@ -64,14 +61,51 @@ public interface Factory { public static ScriptContext CONTEXT = new ScriptContext<>("aggs_init", Factory.class); } - public abstract static class MapScript extends ParamsAndStateBase { + public abstract static class MapScript { + private static final Map DEPRECATIONS; + + static { + Map deprecations = new HashMap<>(); + deprecations.put( + "doc", + "Accessing variable [doc] via [params.doc] from within a scripted metric agg map script " + + "is deprecated in favor of directly accessing [doc]." + ); + deprecations.put( + "_doc", + "Accessing variable [doc] via [params._doc] from within a scripted metric agg map script " + + "is deprecated in favor of directly accessing [doc]." + ); + deprecations.put( + "_agg", + "Accessing variable [_agg] via [params._agg] from within a scripted metric agg map script " + + "is deprecated in favor of using [state]." + ); + DEPRECATIONS = Collections.unmodifiableMap(deprecations); + } + + private final Map params; + private final Map state; private final LeafSearchLookup leafLookup; private Scorable scorer; public MapScript(Map params, Map state, SearchLookup lookup, LeafReaderContext leafContext) { - super(params, state); - + this.state = state; this.leafLookup = leafContext == null ? null : lookup.getLeafSearchLookup(leafContext); + if (leafLookup != null) { + params = new HashMap<>(params); // copy params so we aren't modifying input + params.putAll(leafLookup.asMap()); // add lookup vars + params = new ParameterMap(params, DEPRECATIONS); // wrap with deprecations + } + this.params = params; + } + + public Map getParams() { + return params; + } + + public Map getState() { + return state; } // Return the doc as a map (instead of LeafDocLookup) in order to abide by type whitelisting rules for @@ -117,9 +151,21 @@ public interface Factory { public static ScriptContext CONTEXT = new ScriptContext<>("aggs_map", Factory.class); } - public abstract static class CombineScript extends ParamsAndStateBase { + public abstract static class CombineScript { + private final Map params; + private final Map state; + public CombineScript(Map params, Map state) { - super(params, state); + this.params = params; + this.state = state; + } + + public Map getParams() { + return params; + } + + public Map getState() { + return state; } public abstract Object execute(); diff --git a/server/src/main/java/org/elasticsearch/script/SearchScript.java b/server/src/main/java/org/elasticsearch/script/SearchScript.java deleted file mode 100644 index 2fd439564a61f..0000000000000 --- a/server/src/main/java/org/elasticsearch/script/SearchScript.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.script; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorable; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.lucene.ScorerAware; -import org.elasticsearch.search.lookup.LeafDocLookup; -import org.elasticsearch.search.lookup.LeafSearchLookup; -import org.elasticsearch.search.lookup.SearchLookup; - -import java.io.IOException; -import java.util.Map; - -/** - * A generic script used for per document use cases. - * - * Using a {@link SearchScript} works as follows: - *
    - *
  1. Construct a {@link Factory} using {@link ScriptService#compile(Script, ScriptContext)}
  2. - *
  3. Construct a {@link LeafFactory} for a an index using {@link Factory#newFactory(Map, SearchLookup)}
  4. - *
  5. Construct a {@link SearchScript} for a Lucene segment using {@link LeafFactory#newInstance(LeafReaderContext)}
  6. - *
  7. Call {@link #setDocument(int)} to indicate which document in the segment the script should be run for next
  8. - *
  9. Call one of the {@code run} methods: {@link #run()} or {@link #runAsDouble()}
  10. - *
- */ -public abstract class SearchScript implements ScorerAware { - - /** The generic runtime parameters for the script. */ - private final Map params; - - /** A leaf lookup for the bound segment this script will operate on. */ - private final LeafSearchLookup leafLookup; - - /** A scorer that will return the score for the current document when the script is run. */ - private Scorable scorer; - - public SearchScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { - this.params = params; - // TODO: remove leniency when painless does not implement SearchScript for executable script cases - this.leafLookup = leafContext == null ? null : lookup.getLeafSearchLookup(leafContext); - } - - /** Return the parameters for this script. */ - public Map getParams() { - return params; - } - - /** The leaf lookup for the Lucene segment this script was created for. */ - protected final LeafSearchLookup getLeafLookup() { - return leafLookup; - } - - /** The doc lookup for the Lucene segment this script was created for. */ - public final LeafDocLookup getDoc() { - // TODO: remove leniency when painless does not implement SearchScript for executable script cases - return leafLookup == null ? null : leafLookup.doc(); - } - - /** Set the current document to run the script on next. */ - public void setDocument(int docid) { - // TODO: remove leniency when painless does not implement SearchScript for executable script cases - if (leafLookup != null) { - leafLookup.setDocument(docid); - } - } - - @Override - public void setScorer(Scorable scorer) { - this.scorer = scorer; - } - - /** Return the score of the current document. */ - public double getScore() { - // TODO: remove leniency when painless does not implement SearchScript for executable script cases - if (scorer == null) { - return 0.0d; - } - try { - return scorer.score(); - } catch (IOException e) { - throw new ElasticsearchException("couldn't lookup score", e); - } - } - - /** - * Sets per-document aggregation {@code _value}. - *

- * The default implementation just calls {@code setNextVar("_value", value)} but - * some engines might want to handle this differently for better performance. - *

- * @param value per-document value, typically a String, Long, or Double - */ - public void setNextAggregationValue(Object value) { - setNextVar("_value", value); - } - - public void setNextVar(String field, Object value) {} - - - public Object run() { - return runAsDouble(); - } - - /** Return the result as a double. This is the main use case of search script, used for document scoring. */ - public abstract double runAsDouble(); - - /** A factory to construct {@link SearchScript} instances. */ - public interface LeafFactory { - SearchScript newInstance(LeafReaderContext ctx) throws IOException; - - /** - * Return {@code true} if the script needs {@code _score} calculated, or {@code false} otherwise. - */ - boolean needs_score(); - } - - /** A factory to construct stateful {@link SearchScript} factories for a specific index. */ - public interface Factory { - LeafFactory newFactory(Map params, SearchLookup lookup); - } - - /** The context used to compile {@link SearchScript} factories. */ - public static final ScriptContext CONTEXT = new ScriptContext<>("search", Factory.class); -} diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index aabef751fc76f..7a16c7ad2d51f 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -19,6 +19,7 @@ package org.elasticsearch.script; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; @@ -30,7 +31,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; @@ -59,7 +59,7 @@ public class StoredScriptSource extends AbstractDiffable imp /** * Standard deprecation logger for used to deprecate allowance of empty templates. */ - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(StoredScriptSource.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(StoredScriptSource.class)); /** * Standard {@link ParseField} for outer level of stored script source. diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 3032f618c2f30..47943a92b00a1 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -185,48 +185,48 @@ import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg.AvgBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg.AvgBucketPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max.MaxBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max.MaxBucketPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.min.MinBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.min.MinBucketPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.InternalPercentilesBucket; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.InternalStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketParser; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.InternalExtendedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.sum.SumBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.sum.SumBucketPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketselector.BucketSelectorPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketselector.BucketSelectorPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketsort.BucketSortPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketsort.BucketSortPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.derivative.InternalDerivative; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.EwmaModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltLinearModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltWintersModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovFnPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovFnPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MaxBucketPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.MinBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MinBucketPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.InternalPercentilesBucket; +import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.InternalStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketParser; +import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.InternalExtendedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.SumBucketPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.BucketSelectorPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketSelectorPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.BucketSortPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketSortPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.CumulativeSumPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.InternalDerivative; +import org.elasticsearch.search.aggregations.pipeline.MovAvgPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MovAvgPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.EwmaModel; +import org.elasticsearch.search.aggregations.pipeline.HoltLinearModel; +import org.elasticsearch.search.aggregations.pipeline.HoltWintersModel; +import org.elasticsearch.search.aggregations.pipeline.LinearModel; +import org.elasticsearch.search.aggregations.pipeline.MovAvgModel; +import org.elasticsearch.search.aggregations.pipeline.SimpleModel; +import org.elasticsearch.search.aggregations.pipeline.MovFnPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MovFnPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.SerialDiffPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.SerialDiffPipelineAggregator; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java index 9312f9720cd05..caea05f30e5b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.search.aggregations; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.Comparators; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -528,7 +528,7 @@ public static void writeHistogramOrder(BucketOrder order, StreamOutput out, bool public static class Parser { private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(Parser.class)); + new DeprecationLogger(LogManager.getLogger(Parser.class)); /** * Parse a {@link BucketOrder} from {@link XContent}. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregatorBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java similarity index 63% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregatorBuilders.java rename to server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java index ce87dd797d6e0..a5f89f82f6b35 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregatorBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java @@ -17,24 +17,24 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline; +package org.elasticsearch.search.aggregations; import org.elasticsearch.script.Script; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg.AvgBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max.MaxBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.min.MinBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.sum.SumBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketselector.BucketSelectorPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketsort.BucketSortPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovFnPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketSelectorPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketSortPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MinBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MovAvgPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MovFnPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.SerialDiffPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import java.util.List; @@ -69,13 +69,11 @@ public static StatsBucketPipelineAggregationBuilder statsBucket(String name, Str return new StatsBucketPipelineAggregationBuilder(name, bucketsPath); } - public static ExtendedStatsBucketPipelineAggregationBuilder extendedStatsBucket(String name, - String bucketsPath) { + public static ExtendedStatsBucketPipelineAggregationBuilder extendedStatsBucket(String name, String bucketsPath) { return new ExtendedStatsBucketPipelineAggregationBuilder(name, bucketsPath); } - public static PercentilesBucketPipelineAggregationBuilder percentilesBucket(String name, - String bucketsPath) { + public static PercentilesBucketPipelineAggregationBuilder percentilesBucket(String name, String bucketsPath) { return new PercentilesBucketPipelineAggregationBuilder(name, bucketsPath); } @@ -87,23 +85,19 @@ public static MovAvgPipelineAggregationBuilder movingAvg(String name, String buc return new MovAvgPipelineAggregationBuilder(name, bucketsPath); } - public static BucketScriptPipelineAggregationBuilder bucketScript(String name, - Map bucketsPathsMap, Script script) { + public static BucketScriptPipelineAggregationBuilder bucketScript(String name, Map bucketsPathsMap, Script script) { return new BucketScriptPipelineAggregationBuilder(name, bucketsPathsMap, script); } - public static BucketScriptPipelineAggregationBuilder bucketScript(String name, Script script, - String... bucketsPaths) { + public static BucketScriptPipelineAggregationBuilder bucketScript(String name, Script script, String... bucketsPaths) { return new BucketScriptPipelineAggregationBuilder(name, script, bucketsPaths); } - public static BucketSelectorPipelineAggregationBuilder bucketSelector(String name, - Map bucketsPathsMap, Script script) { + public static BucketSelectorPipelineAggregationBuilder bucketSelector(String name, Map bucketsPathsMap, Script script) { return new BucketSelectorPipelineAggregationBuilder(name, bucketsPathsMap, script); } - public static BucketSelectorPipelineAggregationBuilder bucketSelector(String name, Script script, - String... bucketsPaths) { + public static BucketSelectorPipelineAggregationBuilder bucketSelector(String name, Script script, String... bucketsPaths) { return new BucketSelectorPipelineAggregationBuilder(name, script, bucketsPaths); } @@ -111,8 +105,7 @@ public static BucketSortPipelineAggregationBuilder bucketSort(String name, List< return new BucketSortPipelineAggregationBuilder(name, sorts); } - public static CumulativeSumPipelineAggregationBuilder cumulativeSum(String name, - String bucketsPath) { + public static CumulativeSumPipelineAggregationBuilder cumulativeSum(String name, String bucketsPath) { return new CumulativeSumPipelineAggregationBuilder(name, bucketsPath); } @@ -120,8 +113,7 @@ public static SerialDiffPipelineAggregationBuilder diff(String name, String buck return new SerialDiffPipelineAggregationBuilder(name, bucketsPath); } - public static MovFnPipelineAggregationBuilder movingFunction(String name, Script script, - String bucketsPaths, int window) { + public static MovFnPipelineAggregationBuilder movingFunction(String name, Script script, String bucketsPaths, int window) { return new MovFnPipelineAggregationBuilder(name, bucketsPaths, script, window); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index d612014e0177f..01777292613f9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.significant; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; @@ -31,7 +32,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.index.FilterableTermsEnum; import org.elasticsearch.common.lucene.index.FreqTermsEnum; import org.elasticsearch.index.mapper.MappedFieldType; @@ -60,7 +60,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFactory implements Releasable { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SignificantTermsAggregatorFactory.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( + LogManager.getLogger(SignificantTermsAggregatorFactory.class)); private final IncludeExclude includeExclude; private final String executionHint; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index cc2719e5b9678..1b5eaee639e1b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -19,10 +19,10 @@ package org.elasticsearch.search.aggregations.bucket.terms; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -47,7 +47,7 @@ import java.util.Map; public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TermsAggregatorFactory.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(TermsAggregatorFactory.class)); static Boolean REMAP_GLOBAL_ORDS, COLLECT_SEGMENT_ORDS; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java index 345b21d03887e..da936a76ee1ca 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java @@ -80,7 +80,6 @@ public void collect(int doc, long bucket) throws IOException { leafMapScript.setDocument(doc); leafMapScript.execute(); - CollectionUtils.ensureNoSelfReferences(aggState, "Scripted metric aggs map script"); } }; } @@ -103,4 +102,10 @@ public InternalAggregation buildEmptyAggregation() { return new InternalScriptedMetric(name, null, reduceScript, pipelineAggregators(), metaData()); } + @Override + protected void doPostCollection() throws IOException { + CollectionUtils.ensureNoSelfReferences(aggState, "Scripted metric aggs map script"); + + super.doPostCollection(); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregationBuilder.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregationBuilder.java index d9aa2ae0ebc4b..10b99bb48fb04 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregationBuilder.java @@ -17,14 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregationBuilder; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java similarity index 83% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java index 776862a48d51c..4bb85abd54094 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java @@ -17,15 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregator; import java.io.IOException; import java.util.List; @@ -35,7 +32,7 @@ public class AvgBucketPipelineAggregator extends BucketMetricsPipelineAggregator private int count = 0; private double sum = 0; - protected AvgBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat format, + AvgBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat format, Map metaData) { super(name, bucketsPaths, gapPolicy, format, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricValue.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricValue.java index be22679a4e1bf..b6269e8161dbf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricValue.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsParser.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsParser.java index 3f29a3bfdc034..0e348563235a9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsParser.java @@ -17,13 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java index c77922eff2a5e..27da9dea53099 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,9 +27,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.Collection; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java similarity index 94% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java index 981b21346ade9..0acd5137520c9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg protected final DocValueFormat format; protected final GapPolicy gapPolicy; - protected BucketMetricsPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat format, + BucketMetricsPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat format, Map metaData) { super(name, bucketsPaths, metaData); this.gapPolicy = gapPolicy; @@ -57,7 +57,7 @@ protected BucketMetricsPipelineAggregator(String name, String[] bucketsPaths, Ga /** * Read from a stream. */ - protected BucketMetricsPipelineAggregator(StreamInput in) throws IOException { + BucketMetricsPipelineAggregator(StreamInput in) throws IOException { super(in); format = in.readNamedWriteable(DocValueFormat.class); gapPolicy = GapPolicy.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java index a63fd005f9cb2..db56779559a40 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketscript; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -26,9 +26,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java index 042a30695c61d..bd9371815faae 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketscript; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,8 +29,6 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -48,9 +46,9 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator { private final Script script; private final Map bucketsPathsMap; - public BucketScriptPipelineAggregator(String name, Map bucketsPathsMap, Script script, DocValueFormat formatter, + BucketScriptPipelineAggregator(String name, Map bucketsPathsMap, Script script, DocValueFormat formatter, GapPolicy gapPolicy, Map metadata) { - super(name, bucketsPathsMap.values().toArray(new String[bucketsPathsMap.size()]), metadata); + super(name, bucketsPathsMap.values().toArray(new String[0]), metadata); this.bucketsPathsMap = bucketsPathsMap; this.script = script; this.formatter = formatter; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java index cb8ba81cee6ed..f0497932b21c5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketselector; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -25,9 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java index 06beab04aa605..a17e710c75456 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java @@ -17,8 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketselector; - +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -28,7 +27,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -40,14 +38,12 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator { private GapPolicy gapPolicy; - private Script script; - private Map bucketsPathsMap; - public BucketSelectorPipelineAggregator(String name, Map bucketsPathsMap, Script script, GapPolicy gapPolicy, + BucketSelectorPipelineAggregator(String name, Map bucketsPathsMap, Script script, GapPolicy gapPolicy, Map metadata) { - super(name, bucketsPathsMap.values().toArray(new String[bucketsPathsMap.size()]), metadata); + super(name, bucketsPathsMap.values().toArray(new String[0]), metadata); this.bucketsPathsMap = bucketsPathsMap; this.script = script; this.gapPolicy = gapPolicy; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java index 15c37061cd9ee..0ce4c08720649 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketsort; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,9 +28,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortBuilder; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java similarity index 94% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java index e10d5c35800fe..e98fdec992722 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketsort; +package org.elasticsearch.search.aggregations.pipeline; import org.apache.lucene.util.PriorityQueue; @@ -26,9 +26,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; @@ -47,9 +45,9 @@ public class BucketSortPipelineAggregator extends PipelineAggregator { private final Integer size; private final GapPolicy gapPolicy; - public BucketSortPipelineAggregator(String name, List sorts, int from, Integer size, GapPolicy gapPolicy, + BucketSortPipelineAggregator(String name, List sorts, int from, Integer size, GapPolicy gapPolicy, Map metadata) { - super(name, sorts.stream().map(s -> s.getFieldName()).toArray(String[]::new), metadata); + super(name, sorts.stream().map(FieldSortBuilder::getFieldName).toArray(String[]::new), metadata); this.sorts = sorts; this.from = from; this.size = size; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java index 209af3c03a7fc..6230fff0b0d70 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.cumulativesum; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,9 +30,6 @@ import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java similarity index 91% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java index e144113245265..a70144b421a48 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.cumulativesum; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,8 +29,6 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -44,8 +42,8 @@ public class CumulativeSumPipelineAggregator extends PipelineAggregator { private final DocValueFormat formatter; - public CumulativeSumPipelineAggregator(String name, String[] bucketsPaths, DocValueFormat formatter, - Map metadata) { + CumulativeSumPipelineAggregator(String name, String[] bucketsPaths, DocValueFormat formatter, + Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/Derivative.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/Derivative.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/Derivative.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/Derivative.java index 56f12c0876b22..3e2723dd55863 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/Derivative.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/Derivative.java @@ -17,16 +17,14 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.derivative; - -import org.elasticsearch.search.aggregations.pipeline.SimpleValue; +package org.elasticsearch.search.aggregations.pipeline; public interface Derivative extends SimpleValue { /** * Returns the normalized value. If no normalised factor has been specified * this method will return {@link #value()} - * + * * @return the normalized value */ double normalizedValue(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java similarity index 98% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java index 5fac90b094816..a8ee1293dc93a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.derivative; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -35,9 +35,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.joda.time.DateTimeZone; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregator.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregator.java index 3fe60f23cf31d..303691d1ceb5e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.derivative; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,7 +29,6 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -45,8 +44,8 @@ public class DerivativePipelineAggregator extends PipelineAggregator { private final GapPolicy gapPolicy; private final Double xAxisUnits; - public DerivativePipelineAggregator(String name, String[] bucketsPaths, DocValueFormat formatter, GapPolicy gapPolicy, Long xAxisUnits, - Map metadata) { + DerivativePipelineAggregator(String name, String[] bucketsPaths, DocValueFormat formatter, GapPolicy gapPolicy, Long xAxisUnits, + Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; this.gapPolicy = gapPolicy; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/EwmaModel.java similarity index 94% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/EwmaModel.java index 027536854ccfb..ad2532c3b5049 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/EwmaModel.java @@ -17,14 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg.models; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions; import java.io.IOException; import java.text.ParseException; @@ -39,7 +37,7 @@ public class EwmaModel extends MovAvgModel { public static final String NAME = "ewma"; - public static final double DEFAULT_ALPHA = 0.3; + private static final double DEFAULT_ALPHA = 0.3; /** * Controls smoothing of data. Also known as "level" value. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucket.java similarity index 91% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucket.java index 9e3c7cf88f670..a92fcff9d614f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucket.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.metrics.ExtendedStats; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketParser.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketParser.java index d5e9e4f11e5ab..b602f18dba3df 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketParser.java @@ -17,11 +17,10 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.java index 84dcb03fbe9a7..10347e40354a8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -25,8 +25,6 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregationBuilder; import java.io.IOException; import java.util.Collection; @@ -113,4 +111,4 @@ protected boolean innerEquals(BucketMetricsPipelineAggregationBuilder metaData) { super(name, bucketsPaths, gapPolicy, formatter, metaData); this.sigma = sigma; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/HoltLinearModel.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/HoltLinearModel.java index d029bde29ad95..ec40d2b18b56a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/HoltLinearModel.java @@ -17,14 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg.models; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions; import java.io.IOException; import java.text.ParseException; @@ -38,8 +36,8 @@ public class HoltLinearModel extends MovAvgModel { public static final String NAME = "holt"; - public static final double DEFAULT_ALPHA = 0.3; - public static final double DEFAULT_BETA = 0.1; + private static final double DEFAULT_ALPHA = 0.3; + private static final double DEFAULT_BETA = 0.1; /** * Controls smoothing of data. Also known as "level" value. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/HoltWintersModel.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/HoltWintersModel.java index d61cb5c41873b..df42689a2116e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/HoltWintersModel.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg.models; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.ElasticsearchParseException; @@ -27,8 +27,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions; import java.io.IOException; import java.text.ParseException; @@ -44,12 +42,12 @@ public class HoltWintersModel extends MovAvgModel { public static final String NAME = "holt_winters"; - public static final double DEFAULT_ALPHA = 0.3; - public static final double DEFAULT_BETA = 0.1; - public static final double DEFAULT_GAMMA = 0.3; - public static final int DEFAULT_PERIOD = 1; - public static final SeasonalityType DEFAULT_SEASONALITY_TYPE = SeasonalityType.ADDITIVE; - public static final boolean DEFAULT_PAD = false; + private static final double DEFAULT_ALPHA = 0.3; + private static final double DEFAULT_BETA = 0.1; + private static final double DEFAULT_GAMMA = 0.3; + private static final int DEFAULT_PERIOD = 1; + private static final SeasonalityType DEFAULT_SEASONALITY_TYPE = SeasonalityType.ADDITIVE; + private static final boolean DEFAULT_PAD = false; public enum SeasonalityType { ADDITIVE((byte) 0, "add"), MULTIPLICATIVE((byte) 1, "mult"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValue.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValue.java index a7ef024028f60..8a3de634dbfd4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValue.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -26,7 +26,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivative.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalDerivative.java similarity index 91% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivative.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalDerivative.java index 5f6c57930169d..b8d5245846aeb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivative.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalDerivative.java @@ -17,14 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.derivative; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.List; @@ -34,7 +32,7 @@ public class InternalDerivative extends InternalSimpleValue implements Derivative { private final double normalizationFactor; - public InternalDerivative(String name, double value, double normalizationFactor, DocValueFormat formatter, + InternalDerivative(String name, double value, double normalizationFactor, DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, value, formatter, pipelineAggregators, metaData); this.normalizationFactor = normalizationFactor; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalExtendedStatsBucket.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalExtendedStatsBucket.java index c7f2943bfcfcf..b0b78eb012042 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalExtendedStatsBucket.java @@ -17,13 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalExtendedStats; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java index 97b43e2606907..940511619b15f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,7 +27,6 @@ import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.InternalMax; import org.elasticsearch.search.aggregations.metrics.Percentile; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.Arrays; @@ -42,7 +41,7 @@ public class InternalPercentilesBucket extends InternalNumericMetricsAggregation private double[] percents; private final transient Map percentileLookups = new HashMap<>(); - public InternalPercentilesBucket(String name, double[] percents, double[] percentiles, + InternalPercentilesBucket(String name, double[] percents, double[] percentiles, DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java index 2eac04a9581be..2aac262a0076b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java @@ -35,7 +35,7 @@ public class InternalSimpleValue extends InternalNumericMetricsAggregation.Singl public static final String NAME = "simple_value"; protected final double value; - public InternalSimpleValue(String name, double value, DocValueFormat formatter, List pipelineAggregators, + InternalSimpleValue(String name, double value, DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.format = formatter; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/InternalStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalStatsBucket.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/InternalStatsBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalStatsBucket.java index 352402fff827f..51d3cfc060f73 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/InternalStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalStatsBucket.java @@ -17,13 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalStats; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/LinearModel.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/LinearModel.java index 3859405218286..310403fca83d9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/LinearModel.java @@ -17,15 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg.models; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions; import java.io.IOException; import java.text.ParseException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregationBuilder.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregationBuilder.java index fc2e1cd3e23b5..852a3e378d090 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregationBuilder.java @@ -17,14 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregationBuilder; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java similarity index 84% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java index d17a592c34911..046afc5a87959 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java @@ -17,15 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; import java.io.IOException; import java.util.ArrayList; @@ -37,7 +34,7 @@ public class MaxBucketPipelineAggregator extends BucketMetricsPipelineAggregator private List maxBucketKeys; private double maxValue; - protected MaxBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, + MaxBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, Map metaData) { super(name, bucketsPaths, gapPolicy, formatter, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregationBuilder.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregationBuilder.java index 75cf756441bef..b44ee869e2c4b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregationBuilder.java @@ -17,14 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.min; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregationBuilder; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java similarity index 81% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java index 88595d782616b..d9d0e2a0c3a97 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java @@ -17,15 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.min; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; import java.io.IOException; import java.util.ArrayList; @@ -37,7 +34,7 @@ public class MinBucketPipelineAggregator extends BucketMetricsPipelineAggregator private List minBucketKeys; private double minValue; - protected MinBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, + MinBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, Map metaData) { super(name, bucketsPaths, gapPolicy, formatter, metaData); } @@ -73,7 +70,7 @@ protected void collectBucketValue(String bucketKey, Double bucketValue) { @Override protected InternalAggregation buildAggregation(List pipelineAggregators, Map metadata) { - String[] keys = minBucketKeys.toArray(new String[minBucketKeys.size()]); + String[] keys = minBucketKeys.toArray(new String[0]); return new InternalBucketMetricValue(name(), keys, minValue, format, Collections.emptyList(), metaData()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgModel.java similarity index 99% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgModel.java index f826c01adced1..7c47d2eadf1a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgModel.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg.models; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteable; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModelBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgModelBuilder.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModelBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgModelBuilder.java index 0c74ead985e32..1cb13dd200c7d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModelBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgModelBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg.models; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.xcontent.ToXContentFragment; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java index 1f36d5395b2da..c88cc9ec8feb5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java @@ -17,14 +17,14 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg; +package org.elasticsearch.search.aggregations.pipeline; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -34,12 +34,7 @@ import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModelBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; import java.io.IOException; import java.text.ParseException; @@ -56,13 +51,13 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregationBuilder { public static final String NAME = "moving_avg"; - public static final ParseField MODEL = new ParseField("model"); + static final ParseField MODEL = new ParseField("model"); private static final ParseField WINDOW = new ParseField("window"); public static final ParseField SETTINGS = new ParseField("settings"); private static final ParseField PREDICT = new ParseField("predict"); private static final ParseField MINIMIZE = new ParseField("minimize"); private static final DeprecationLogger DEPRECATION_LOGGER - = new DeprecationLogger(Loggers.getLogger(MovAvgPipelineAggregationBuilder.class)); + = new DeprecationLogger(LogManager.getLogger(MovAvgPipelineAggregationBuilder.class)); private String format; private GapPolicy gapPolicy = GapPolicy.SKIP; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java index 196f7cca4737f..10d1cdc5a71cd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.common.io.stream.StreamInput; @@ -31,9 +31,6 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; import java.io.IOException; import java.util.ArrayList; @@ -53,8 +50,8 @@ public class MovAvgPipelineAggregator extends PipelineAggregator { private final int predict; private final boolean minimize; - public MovAvgPipelineAggregator(String name, String[] bucketsPaths, DocValueFormat formatter, GapPolicy gapPolicy, - int window, int predict, MovAvgModel model, boolean minimize, Map metadata) { + MovAvgPipelineAggregator(String name, String[] bucketsPaths, DocValueFormat formatter, GapPolicy gapPolicy, + int window, int predict, MovAvgModel model, boolean minimize, Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; this.gapPolicy = gapPolicy; @@ -126,9 +123,9 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext if (model.hasValue(values.size())) { double movavg = model.next(values); - List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); + List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) + .map((p) -> (InternalAggregation) p) + .collect(Collectors.toList()); aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList(), metaData())); newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 375125dbefc55..321e7b3aa6f3f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movfn; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -34,9 +34,7 @@ import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.Collection; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregator.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index fc0ba7afac065..4f14df2d66d12 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movfn; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,9 +29,6 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovingFunctionScript.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctionScript.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovingFunctionScript.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctionScript.java index 2f6751b73cc91..79e1f740729ce 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovingFunctionScript.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctionScript.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movfn; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.script.ScriptContext; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovingFunctions.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java similarity index 99% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovingFunctions.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java index 020189d461935..6ad1bf714b20f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovingFunctions.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movfn; +package org.elasticsearch.search.aggregations.pipeline; import java.util.Arrays; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/ParsedBucketMetricValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedBucketMetricValue.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/ParsedBucketMetricValue.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedBucketMetricValue.java index 69e99352636b6..8e8515cb3d74b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/ParsedBucketMetricValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedBucketMetricValue.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -70,4 +70,4 @@ public static ParsedBucketMetricValue fromXContent(XContentParser parser, final bucketMetricValue.setName(name); return bucketMetricValue; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/ParsedDerivative.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/ParsedDerivative.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java index 2b871a99d9a6a..c017b6d60e32e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/ParsedDerivative.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java @@ -17,14 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.derivative; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; import java.io.IOException; @@ -76,4 +75,4 @@ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) } return builder; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ParsedExtendedStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedExtendedStatsBucket.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ParsedExtendedStatsBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedExtendedStatsBucket.java index caa014c9b4944..8823152cd59aa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ParsedExtendedStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedExtendedStatsBucket.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/ParsedPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/ParsedPercentilesBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java index c635ff82735b3..360ed9de214b0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/ParsedPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/ParsedStatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedStatsBucket.java similarity index 95% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/ParsedStatsBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedStatsBucket.java index 84ec05f4eef9f..bacdc5f4a8a27 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/ParsedStatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedStatsBucket.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucket.java similarity index 91% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucket.java index 0dfe9d24582f5..5e0efb98a0ea7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucket.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.metrics.Percentiles; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index 56dd0d3e786fc..49e065cdeef98 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; +package org.elasticsearch.search.aggregations.pipeline; import com.carrotsearch.hppc.DoubleArrayList; @@ -29,9 +29,6 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregationBuilder; import java.io.IOException; import java.util.Arrays; @@ -42,8 +39,7 @@ public class PercentilesBucketPipelineAggregationBuilder extends BucketMetricsPipelineAggregationBuilder { public static final String NAME = "percentiles_bucket"; - - public static final ParseField PERCENTS_FIELD = new ParseField("percents"); + static final ParseField PERCENTS_FIELD = new ParseField("percents"); private double[] percents = new double[] { 1.0, 5.0, 25.0, 50.0, 75.0, 95.0, 99.0 }; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java similarity index 86% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java index 7f51a99d79867..20c38ca05bd75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java @@ -17,16 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; +package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -35,12 +32,11 @@ import java.util.Map; public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAggregator { - public final ParseField PERCENTS_FIELD = new ParseField("percents"); private final double[] percents; private List data; - protected PercentilesBucketPipelineAggregator(String name, double[] percents, String[] bucketsPaths, GapPolicy gapPolicy, + PercentilesBucketPipelineAggregator(String name, double[] percents, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, Map metaData) { super(name, bucketsPaths, gapPolicy, formatter, metaData); this.percents = percents; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java similarity index 96% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java index a7e43c401e8a6..c2963f610ac7c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.serialdiff; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -26,9 +26,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -41,7 +39,6 @@ public class SerialDiffPipelineAggregationBuilder extends AbstractPipelineAggregationBuilder { public static final String NAME = "serial_diff"; - public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); private static final ParseField GAP_POLICY = new ParseField("gap_policy"); private static final ParseField LAG = new ParseField("lag"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java index d438104be7fe4..cb5d5e583ce84 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.serialdiff; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.EvictingQueue; @@ -31,8 +31,6 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -48,8 +46,8 @@ public class SerialDiffPipelineAggregator extends PipelineAggregator { private GapPolicy gapPolicy; private int lag; - public SerialDiffPipelineAggregator(String name, String[] bucketsPaths, @Nullable DocValueFormat formatter, GapPolicy gapPolicy, - int lag, Map metadata) { + SerialDiffPipelineAggregator(String name, String[] bucketsPaths, @Nullable DocValueFormat formatter, GapPolicy gapPolicy, + int lag, Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; this.gapPolicy = gapPolicy; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SiblingPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SiblingPipelineAggregator.java index b78691455d59e..c04bd9fa0dfa4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SiblingPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SiblingPipelineAggregator.java @@ -36,14 +36,14 @@ import java.util.stream.StreamSupport; public abstract class SiblingPipelineAggregator extends PipelineAggregator { - protected SiblingPipelineAggregator(String name, String[] bucketsPaths, Map metaData) { + SiblingPipelineAggregator(String name, String[] bucketsPaths, Map metaData) { super(name, bucketsPaths, metaData); } /** * Read from a stream. */ - protected SiblingPipelineAggregator(StreamInput in) throws IOException { + SiblingPipelineAggregator(StreamInput in) throws IOException { super(in); } @@ -55,15 +55,15 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; List buckets = multiBucketsAgg.getBuckets(); List newBuckets = new ArrayList<>(); - for (int i = 0; i < buckets.size(); i++) { - InternalMultiBucketAggregation.InternalBucket bucket = (InternalMultiBucketAggregation.InternalBucket) buckets.get(i); + for (Bucket bucket1 : buckets) { + InternalMultiBucketAggregation.InternalBucket bucket = (InternalMultiBucketAggregation.InternalBucket) bucket1; InternalAggregation aggToAdd = doReduce(bucket.getAggregations(), reduceContext); - List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); + List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) + .map((p) -> (InternalAggregation) p) + .collect(Collectors.toList()); aggs.add(aggToAdd); InternalMultiBucketAggregation.InternalBucket newBucket = multiBucketsAgg.createBucket(new InternalAggregations(aggs), - bucket); + bucket); newBuckets.add(newBucket); } @@ -71,9 +71,9 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext } else if (aggregation instanceof InternalSingleBucketAggregation) { InternalSingleBucketAggregation singleBucketAgg = (InternalSingleBucketAggregation) aggregation; InternalAggregation aggToAdd = doReduce(singleBucketAgg.getAggregations(), reduceContext); - List aggs = StreamSupport.stream(singleBucketAgg.getAggregations().spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); + List aggs = StreamSupport.stream(singleBucketAgg.getAggregations().spliterator(), false) + .map((p) -> (InternalAggregation) p) + .collect(Collectors.toList()); aggs.add(aggToAdd); return singleBucketAgg.create(new InternalAggregations(aggs)); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SimpleModel.java similarity index 93% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SimpleModel.java index b54dba242f9f9..a64131278a563 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SimpleModel.java @@ -17,14 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg.models; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions; import java.io.IOException; import java.text.ParseException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/SimulatedAnealingMinimizer.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java similarity index 97% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/SimulatedAnealingMinimizer.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java index 711ee2299cffc..e157b2cec1c6f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/SimulatedAnealingMinimizer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java @@ -17,10 +17,9 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movavg; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.collect.EvictingQueue; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; /** * A cost minimizer which will fit a MovAvgModel to the data. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucket.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucket.java index c29a27b8446ab..dd05d8328a09d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucket.java @@ -1,4 +1,4 @@ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats; +package org.elasticsearch.search.aggregations.pipeline; /* * Licensed to Elasticsearch under one or more contributor diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregationBuilder.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucketPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregationBuilder.java index c472f1a3487e0..f943f3318fc84 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregationBuilder.java @@ -17,14 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregationBuilder; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucketPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java similarity index 86% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucketPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java index 4b1febf444858..6ba9a2bfed3fc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/StatsBucketPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java @@ -17,14 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregator; import java.io.IOException; import java.util.List; @@ -36,7 +34,7 @@ public class StatsBucketPipelineAggregator extends BucketMetricsPipelineAggregat private double min = Double.POSITIVE_INFINITY; private double max = Double.NEGATIVE_INFINITY; - protected StatsBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, + StatsBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, Map metaData) { super(name, bucketsPaths, gapPolicy, formatter, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/sum/SumBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregationBuilder.java similarity index 88% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/sum/SumBucketPipelineAggregationBuilder.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregationBuilder.java index e415f3adc409a..920f7e9b0ac26 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/sum/SumBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregationBuilder.java @@ -17,14 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.sum; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregationBuilder; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/sum/SumBucketPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java similarity index 82% rename from server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/sum/SumBucketPipelineAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java index 7efbf401d04da..1f25651bc8c9b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/sum/SumBucketPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java @@ -17,15 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.sum; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsPipelineAggregator; import java.io.IOException; import java.util.List; @@ -34,7 +31,7 @@ public class SumBucketPipelineAggregator extends BucketMetricsPipelineAggregator { private double sum = 0; - protected SumBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, + SumBucketPipelineAggregator(String name, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, Map metaData) { super(name, bucketsPaths, gapPolicy, formatter, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 92ae481a830dd..60767bbe3719b 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.builder; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; @@ -29,7 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -78,7 +78,7 @@ */ public final class SearchSourceBuilder implements Writeable, ToXContentObject, Rewriteable { private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(SearchSourceBuilder.class)); + new DeprecationLogger(LogManager.getLogger(SearchSourceBuilder.class)); public static final ParseField FROM_FIELD = new ParseField("from"); public static final ParseField SIZE_FIELD = new ParseField("size"); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index 398bc847b3374..0819dfd74dfaf 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.search.fetch.subphase; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.SortedNumericDocValues; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -55,7 +55,8 @@ */ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(DocValueFieldsFetchSubPhase.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( + LogManager.getLogger(DocValueFieldsFetchSubPhase.class)); @Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 7e6945b9d4822..9b11de93bee8a 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.slice; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -33,7 +34,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -65,7 +65,7 @@ */ public class SliceBuilder implements Writeable, ToXContentObject { - private static final DeprecationLogger DEPRECATION_LOG = new DeprecationLogger(Loggers.getLogger(SliceBuilder.class)); + private static final DeprecationLogger DEPRECATION_LOG = new DeprecationLogger(LogManager.getLogger(SliceBuilder.class)); public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField ID_FIELD = new ParseField("id"); diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 19a62d7444376..c4e33fa091bd3 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.sort; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.SortField; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -50,7 +50,7 @@ * A sort builder to sort based on a document field. */ public class FieldSortBuilder extends SortBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(FieldSortBuilder.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(FieldSortBuilder.class)); public static final String NAME = "field_sort"; public static final ParseField MISSING = new ParseField("missing"); diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 2c8c4e234dbb8..07af9ffb10c69 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.sort; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -36,7 +37,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -72,7 +72,7 @@ * A geo distance based sorting on a geo point like field. */ public class GeoDistanceSortBuilder extends SortBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(GeoDistanceSortBuilder.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(GeoDistanceSortBuilder.class)); public static final String NAME = "_geo_distance"; public static final String ALTERNATIVE_NAME = "_geoDistance"; diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 95478e083243a..427d262ba9bb7 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.sort; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; @@ -31,7 +32,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -66,7 +66,7 @@ * Script sort builder allows to sort based on a custom script expression. */ public class ScriptSortBuilder extends SortBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ScriptSortBuilder.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ScriptSortBuilder.class)); public static final String NAME = "_script"; public static final ParseField TYPE_FIELD = new ParseField("type"); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 87ea8cb978fe4..791b59a1d5bb0 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -270,7 +270,7 @@ public ClusterState execute(ClusterState currentState) { // Index doesn't exist - create it and start recovery // Make sure that the index we are about to create has a validate name MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState); - createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings(), false); + createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings(), currentState, false); IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName); indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())); if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java index 79424541810c4..43ddc83ba151a 100644 --- a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java +++ b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java @@ -19,16 +19,16 @@ package org.elasticsearch.tasks; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.logging.Loggers; /** * A TaskListener that just logs the response at the info level. Used when we * need a listener but aren't returning the result to the user. */ public final class LoggingTaskListener implements TaskListener { - private static final Logger logger = Loggers.getLogger(LoggingTaskListener.class); + private static final Logger logger = LogManager.getLogger(LoggingTaskListener.class); /** * Get the instance of NoopActionListener cast appropriately. diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index 4e4d369330c80..5f2635fac88d9 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -67,7 +67,7 @@ public class ConnectionManager implements Closeable { private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool) { - this(settings, transport, threadPool, buildDefaultConnectionProfile(settings)); + this(settings, transport, threadPool, ConnectionProfile.buildDefaultConnectionProfile(settings)); } public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, ConnectionProfile defaultProfile) { @@ -323,23 +323,4 @@ public void onConnectionClosed(Transport.Connection connection) { } } } - - public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { - int connectionsPerNodeRecovery = TransportService.CONNECTIONS_PER_NODE_RECOVERY.get(settings); - int connectionsPerNodeBulk = TransportService.CONNECTIONS_PER_NODE_BULK.get(settings); - int connectionsPerNodeReg = TransportService.CONNECTIONS_PER_NODE_REG.get(settings); - int connectionsPerNodeState = TransportService.CONNECTIONS_PER_NODE_STATE.get(settings); - int connectionsPerNodePing = TransportService.CONNECTIONS_PER_NODE_PING.get(settings); - ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); - builder.setConnectTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings)); - builder.setHandshakeTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings)); - builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); - builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); - // if we are not master eligible we don't need a dedicated channel to publish the state - builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); - // if we are not a data-node we don't need any dedicated channels for recovery - builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); - builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); - return builder.build(); - } } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index b9ed42ca00a56..d6183655fa2cb 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -18,7 +18,9 @@ */ package org.elasticsearch.transport; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import java.util.ArrayList; @@ -91,6 +93,31 @@ public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionPro } } + /** + * Builds a default connection profile based on the provided settings. + * + * @param settings to build the connection profile from + * @return the connection profile + */ + public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { + int connectionsPerNodeRecovery = TransportService.CONNECTIONS_PER_NODE_RECOVERY.get(settings); + int connectionsPerNodeBulk = TransportService.CONNECTIONS_PER_NODE_BULK.get(settings); + int connectionsPerNodeReg = TransportService.CONNECTIONS_PER_NODE_REG.get(settings); + int connectionsPerNodeState = TransportService.CONNECTIONS_PER_NODE_STATE.get(settings); + int connectionsPerNodePing = TransportService.CONNECTIONS_PER_NODE_PING.get(settings); + Builder builder = new Builder(); + builder.setConnectTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings)); + builder.setHandshakeTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings)); + builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); + builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); + // if we are not master eligible we don't need a dedicated channel to publish the state + builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); + // if we are not a data-node we don't need any dedicated channels for recovery + builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); + builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); + return builder.build(); + } + /** * A builder to build a new {@link ConnectionProfile} */ diff --git a/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java b/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java index 0b0504f460908..e75e73626c1c0 100644 --- a/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java +++ b/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.watcher; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.nio.file.Files; @@ -38,7 +38,7 @@ public class FileWatcher extends AbstractResourceWatcher { private FileObserver rootFileObserver; private Path file; - private static final Logger logger = Loggers.getLogger(FileWatcher.class); + private static final Logger logger = LogManager.getLogger(FileWatcher.class); /** * Creates new file watcher on the given directory diff --git a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index 6e4c97fd3dad2..c014845ce093d 100644 --- a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -114,7 +114,7 @@ public void testGuessRootCause() { ElasticsearchException[] rootCauses = exception.guessRootCauses(); assertEquals(rootCauses.length, 1); assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "index_not_found_exception"); - assertEquals(rootCauses[0].getMessage(), "no such index"); + assertEquals("no such index [foo]", rootCauses[0].getMessage()); ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)); ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index c0d29e86fd60b..e1a7633e1d0dc 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -181,7 +181,7 @@ public void testMinCompatVersion() { // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() - Version lastVersion = Version.V_6_5_0; // TODO: remove this once min compat version is a constant instead of method + Version lastVersion = Version.V_6_6_0; // TODO: remove this once min compat version is a constant instead of method assertEquals(lastVersion.major, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().major); assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()", lastVersion.minor, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().minor); @@ -340,7 +340,8 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertTrue(isCompatible(Version.V_6_5_0, Version.V_7_0_0_alpha1)); + assertFalse(isCompatible(Version.V_6_5_0, Version.V_7_0_0_alpha1)); + assertTrue(isCompatible(Version.V_6_6_0, Version.V_7_0_0_alpha1)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0_alpha1)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); assertFalse(isCompatible(Version.fromString("7.0.0"), Version.fromString("8.0.0"))); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index ba4aa3015f308..48914fca13133 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -73,7 +73,7 @@ public void testSimpleUnknownIndex() { client().admin().indices().prepareGetIndex().addIndices("missing_idx").get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); + assertThat(e.getMessage(), is("no such index [missing_idx]")); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java index ca1630b00641c..e82be77fc147c 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java @@ -45,7 +45,7 @@ public void testBulkProcessorAutoCreateRestrictions() throws Exception { assertEquals(3, responses.length); assertFalse("Operation on existing index should succeed", responses[0].isFailed()); assertTrue("Missing index should have been flagged", responses[1].isFailed()); - assertEquals("[wontwork] IndexNotFoundException[no such index]", responses[1].getFailureMessage()); + assertEquals("[wontwork] IndexNotFoundException[no such index [wontwork]]", responses[1].getFailureMessage()); assertFalse("Operation on existing index should succeed", responses[2].isFailed()); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index b0ac2ed5fa0d3..2a155d2e3adbb 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.action.search; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; @@ -40,7 +40,7 @@ * SearchPhaseContext for tests */ public final class MockSearchPhaseContext implements SearchPhaseContext { - private static final Logger logger = Loggers.getLogger(MockSearchPhaseContext.class); + private static final Logger logger = LogManager.getLogger(MockSearchPhaseContext.class); public AtomicReference phaseFailure = new AtomicReference<>(); final int numShards; final AtomicInteger numSuccess; diff --git a/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java b/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java index 84dfe5ec93e9f..4918939c90b86 100644 --- a/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java @@ -83,9 +83,10 @@ public void testHandleSpaces() { // see #21449 public void testAutoCreationDisabled() { Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false).build(); AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); + String randomIndex = randomAlphaOfLengthBetween(1, 10); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> - autoCreateIndex.shouldAutoCreate(randomAlphaOfLengthBetween(1, 10), buildClusterState())); - assertEquals("no such index and [action.auto_create_index] is [false]", e.getMessage()); + autoCreateIndex.shouldAutoCreate(randomIndex, buildClusterState())); + assertEquals("no such index [" + randomIndex + "] and [action.auto_create_index] is [false]", e.getMessage()); } public void testAutoCreationEnabled() { @@ -207,14 +208,15 @@ private AutoCreateIndex newAutoCreateIndex(Settings settings) { private void expectNotMatch(ClusterState clusterState, AutoCreateIndex autoCreateIndex, String index) { IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> autoCreateIndex.shouldAutoCreate(index, clusterState)); - assertEquals("no such index and [action.auto_create_index] ([" + autoCreateIndex.getAutoCreate() + "]) doesn't match", - e.getMessage()); + assertEquals( + "no such index [" + index + "] and [action.auto_create_index] ([" + autoCreateIndex.getAutoCreate() + "]) doesn't match", + e.getMessage()); } private void expectForbidden(ClusterState clusterState, AutoCreateIndex autoCreateIndex, String index, String forbiddingPattern) { IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> autoCreateIndex.shouldAutoCreate(index, clusterState)); - assertEquals("no such index and [action.auto_create_index] contains [" + forbiddingPattern + assertEquals("no such index [" + index + "] and [action.auto_create_index] contains [" + forbiddingPattern + "] which forbids automatic creation of the index", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java index 10a4c9f3e1d7a..2f75f6df1a88e 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java @@ -76,7 +76,7 @@ public void testMissingIndexThrowsMissingIndex() throws Exception { MultiTermVectorsResponse response = mtvBuilder.execute().actionGet(); assertThat(response.getResponses().length, equalTo(1)); assertThat(response.getResponses()[0].getFailure().getCause(), instanceOf(IndexNotFoundException.class)); - assertThat(response.getResponses()[0].getFailure().getCause().getMessage(), equalTo("no such index")); + assertThat(response.getResponses()[0].getFailure().getCause().getMessage(), equalTo("no such index [testX]")); } public void testMultiTermVectorsWithVersion() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 606e716d210b2..3e27b784e0a10 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -271,7 +271,7 @@ public void testIndicesOptionsOnAllowNoIndicesFalse() throws Exception { client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("a*").setIndicesOptions(allowNoIndices).get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); + assertThat(e.getMessage(), is("no such index [a*]")); } } @@ -282,7 +282,7 @@ public void testIndicesIgnoreUnavailableFalse() throws Exception { client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("fzzbzz").setIndicesOptions(allowNoIndices).get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); + assertThat(e.getMessage(), is("no such index [fzzbzz]")); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index b814716cb4717..fbb0fa732f601 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.allocation; import com.carrotsearch.hppc.ObjectIntHashMap; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.cluster.ClusterState; @@ -28,7 +30,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.test.ESIntegTestCase; @@ -43,7 +44,7 @@ @ClusterScope(scope= ESIntegTestCase.Scope.TEST, numDataNodes =0, minNumDataNodes = 2) public class AwarenessAllocationIT extends ESIntegTestCase { - private final Logger logger = Loggers.getLogger(AwarenessAllocationIT.class); + private final Logger logger = LogManager.getLogger(AwarenessAllocationIT.class); @Override protected int numberOfReplicas() { diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 7735fe4b241cc..e7bcce2817c0b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.allocation; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; @@ -74,7 +75,7 @@ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class ClusterRerouteIT extends ESIntegTestCase { - private final Logger logger = Loggers.getLogger(ClusterRerouteIT.class); + private final Logger logger = LogManager.getLogger(ClusterRerouteIT.class); public void testRerouteWithCommands_disableAllocationSettings() throws Exception { Settings commonSettings = Settings.builder() @@ -334,7 +335,7 @@ public void testMessageLogging() throws Exception{ .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)) .execute().actionGet(); - Logger actionLogger = Loggers.getLogger(TransportClusterRerouteAction.class); + Logger actionLogger = LogManager.getLogger(TransportClusterRerouteAction.class); MockLogAppender dryRunMockLog = new MockLogAppender(); dryRunMockLog.start(); diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index ccdc1d6ab3323..c3d1a6040a8f5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -46,7 +46,7 @@ @ClusterScope(scope= Scope.TEST, numDataNodes =0) public class FilteringAllocationIT extends ESIntegTestCase { - private final Logger logger = Loggers.getLogger(FilteringAllocationIT.class); + private final Logger logger = LogManager.getLogger(FilteringAllocationIT.class); public void testDecommissionNodeNoReplicas() throws Exception { logger.info("--> starting 2 nodes"); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 9ad9603b1489b..0832df7c896d9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -530,7 +530,7 @@ public void testConcreteIndicesIgnoreIndicesOneMissingIndex() { IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "testZZZ")); - assertThat(infe.getMessage(), is("no such index")); + assertThat(infe.getMessage(), is("no such index [testZZZ]")); } public void testConcreteIndicesIgnoreIndicesOneMissingIndexOtherFound() { @@ -552,7 +552,7 @@ public void testConcreteIndicesIgnoreIndicesAllMissing() { IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "testMo", "testMahdy")); - assertThat(infe.getMessage(), is("no such index")); + assertThat(infe.getMessage(), is("no such index [testMo]")); } public void testConcreteIndicesIgnoreIndicesEmptyRequest() { @@ -1161,7 +1161,7 @@ public void testDeleteIndexIgnoresAliases() { IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(state, new DeleteIndexRequest("does_not_exist"))); assertEquals("does_not_exist", infe.getIndex().getName()); - assertEquals("no such index", infe.getMessage()); + assertEquals("no such index [does_not_exist]", infe.getMessage()); } { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index abb34f80eac0e..5ccacee395a31 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -34,7 +34,9 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; +import org.elasticsearch.cluster.shards.ClusterShardLimitIT; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -56,7 +58,11 @@ import java.util.stream.Stream; import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; +import static org.elasticsearch.cluster.shards.ClusterShardLimitIT.ShardCounts.forDataNodeCount; +import static org.elasticsearch.indices.IndicesServiceTests.createClusterForShardLimitTest; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -118,9 +124,9 @@ public void testValidateShrinkIndex() { MetaDataCreateIndexService.validateShrinkIndex(state, "target", Collections.emptySet(), "source", Settings.EMPTY) ).getMessage()); - assertEquals("no such index", + assertEquals("no such index [no_such_index]", expectThrows(IndexNotFoundException.class, () -> - MetaDataCreateIndexService.validateShrinkIndex(state, "no such index", Collections.emptySet(), "target", Settings.EMPTY) + MetaDataCreateIndexService.validateShrinkIndex(state, "no_such_index", Collections.emptySet(), "target", Settings.EMPTY) ).getMessage()); Settings targetSettings = Settings.builder().put("index.number_of_shards", 1).build(); @@ -194,9 +200,9 @@ public void testValidateSplitIndex() { MetaDataCreateIndexService.validateSplitIndex(state, "target", Collections.emptySet(), "source", targetSettings) ).getMessage()); - assertEquals("no such index", + assertEquals("no such index [no_such_index]", expectThrows(IndexNotFoundException.class, () -> - MetaDataCreateIndexService.validateSplitIndex(state, "no such index", Collections.emptySet(), "target", targetSettings) + MetaDataCreateIndexService.validateSplitIndex(state, "no_such_index", Collections.emptySet(), "target", targetSettings) ).getMessage()); assertEquals("the number of source shards [10] must be less that the number of target shards [5]", @@ -466,4 +472,30 @@ public void testCalculateNumRoutingShards() { assertEquals("ratio is not a power of two", intRatio, Integer.highestOneBit(intRatio)); } } + + public void testShardLimitDeprecationWarning() { + int nodesInCluster = randomIntBetween(2,100); + ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster); + Settings clusterSettings = Settings.builder() + .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode()) + .build(); + ClusterState state = createClusterForShardLimitTest(nodesInCluster, counts.getFirstIndexShards(), counts.getFirstIndexReplicas(), + clusterSettings); + + Settings indexSettings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, counts.getFailingIndexShards()) + .put(SETTING_NUMBER_OF_REPLICAS, counts.getFailingIndexReplicas()) + .build(); + + DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + MetaDataCreateIndexService.checkShardLimit(indexSettings, state, deprecationLogger); + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + assertWarnings("In a future major version, this request will fail because this action would add [" + + totalShards + "] total shards, but this cluster currently has [" + currentShards + "]/[" + maxShards + "] maximum shards open."+ + " Before upgrading, reduce the number of shards in your cluster or adjust the cluster setting [cluster.max_shards_per_node]."); + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java new file mode 100644 index 0000000000000..55e2216edb564 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.shards.ClusterShardLimitIT; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.shards.ClusterShardLimitIT.ShardCounts.forDataNodeCount; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MetaDataIndexStateServiceTests extends ESTestCase { + + public void testValidateShardLimitDeprecationWarning() { + int nodesInCluster = randomIntBetween(2,100); + ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster); + Settings clusterSettings = Settings.builder() + .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode()) + .build(); + ClusterState state = createClusterForShardLimitTest(nodesInCluster, counts.getFirstIndexShards(), counts.getFirstIndexReplicas(), + counts.getFailingIndexShards(), counts.getFailingIndexReplicas(), clusterSettings); + + Index[] indices = Arrays.stream(state.metaData().indices().values().toArray(IndexMetaData.class)) + .map(IndexMetaData::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + MetaDataIndexStateService.validateShardLimit(state, indices, deprecationLogger); + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + assertWarnings("In a future major version, this request will fail because this action would add [" + + totalShards + "] total shards, but this cluster currently has [" + currentShards + "]/[" + maxShards + "] maximum shards open."+ + " Before upgrading, reduce the number of shards in your cluster or adjust the cluster setting [cluster.max_shards_per_node]."); + } + + public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int openIndexShards, int openIndexReplicas, + int closedIndexShards, int closedIndexReplicas, Settings clusterSettings) { + ImmutableOpenMap.Builder dataNodes = ImmutableOpenMap.builder(); + for (int i = 0; i < nodesInCluster; i++) { + dataNodes.put(randomAlphaOfLengthBetween(5,15), mock(DiscoveryNode.class)); + } + DiscoveryNodes nodes = mock(DiscoveryNodes.class); + when(nodes.getDataNodes()).thenReturn(dataNodes.build()); + + IndexMetaData.Builder openIndexMetaData = IndexMetaData.builder(randomAlphaOfLengthBetween(5, 15)) + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .creationDate(randomLong()) + .numberOfShards(openIndexShards) + .numberOfReplicas(openIndexReplicas); + IndexMetaData.Builder closedIndexMetaData = IndexMetaData.builder(randomAlphaOfLengthBetween(5, 15)) + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .creationDate(randomLong()) + .state(IndexMetaData.State.CLOSE) + .numberOfShards(closedIndexShards) + .numberOfReplicas(closedIndexReplicas); + MetaData.Builder metaData = MetaData.builder().put(openIndexMetaData).put(closedIndexMetaData); + if (randomBoolean()) { + metaData.persistentSettings(clusterSettings); + } else { + metaData.transientSettings(clusterSettings); + } + + return ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(nodes) + .build(); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index e658ff03a186d..dd9846a7b7526 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -32,7 +34,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.hamcrest.Matcher; @@ -47,7 +48,7 @@ import static org.hamcrest.Matchers.not; public class AddIncrementallyTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(AddIncrementallyTests.class); + private final Logger logger = LogManager.getLogger(AddIncrementallyTests.class); public void testAddNodesAndIndices() { Settings.Builder settings = Settings.builder(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index da0920e69373b..189dc4542b4b7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; @@ -44,7 +45,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -70,7 +70,7 @@ import static org.hamcrest.Matchers.nullValue; public class AllocationCommandsTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(AllocationCommandsTests.class); + private final Logger logger = LogManager.getLogger(AllocationCommandsTests.class); public void testMoveShardCommand() { AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); @@ -180,7 +180,7 @@ public void testAllocateCommand() { allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand("test2", 0, "node2")), false, false); fail("expected ShardNotFoundException when allocating non-existing index"); } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), containsString("no such index")); + assertThat(e.getMessage(), containsString("no such index [test2]")); } logger.info("--> allocating empty primary with acceptDataLoss flag set to false"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index 2c1ec07c7fa9b..38a72adeb1b3a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import java.util.HashMap; @@ -51,7 +51,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(AwarenessAllocationTests.class); + private final Logger logger = LogManager.getLogger(AwarenessAllocationTests.class); public void testMoveShardOnceNewNodeWithAttributeAdded1() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 5e400d95e4b02..006c6dff6eb0b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.Version; @@ -38,7 +40,6 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; @@ -52,7 +53,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(BalanceConfigurationTests.class); + private final Logger logger = LogManager.getLogger(BalanceConfigurationTests.class); // TODO maybe we can randomize these numbers somehow final int numberOfNodes = 25; final int numberOfIndices = 12; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index 8cccdb08fb5e9..cb47426102dbb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +32,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.not; public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class); + private final Logger logger = LogManager.getLogger(ClusterRebalanceRoutingTests.class); public void testAlways() { AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java index aa7be906a63dc..5c851467c047c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.nullValue; public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(ConcurrentRebalanceRoutingTests.class); + private final Logger logger = LogManager.getLogger(ConcurrentRebalanceRoutingTests.class); public void testClusterConcurrentRebalance() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index 81c6685ca149d..01cb709568972 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.not; public class DeadNodesAllocationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(DeadNodesAllocationTests.class); + private final Logger logger = LogManager.getLogger(DeadNodesAllocationTests.class); public void testSimpleDeadNodeOnStartedPrimaryShard() { AllocationService allocation = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java index 37e8d83592ff5..8c710d01a30ac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -38,7 +38,7 @@ import static org.hamcrest.Matchers.not; public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class); + private final Logger logger = LogManager.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class); public void testElectReplicaAsPrimaryDuringRelocation() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 8ebe627751ce4..106e95b677586 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; @@ -33,7 +34,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.not; public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(ExpectedShardSizeAllocationTests.class); + private final Logger logger = LogManager.getLogger(ExpectedShardSizeAllocationTests.class); public void testInitializingHasExpectedSize() { final long byteSize = randomIntBetween(0, Integer.MAX_VALUE); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index fbdcadc6ec32f..b84c65e81673d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; @@ -38,7 +39,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.indices.cluster.ClusterStateChanges; @@ -63,7 +63,7 @@ import static org.hamcrest.Matchers.equalTo; public class FailedNodeRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(FailedNodeRoutingTests.class); + private final Logger logger = LogManager.getLogger(FailedNodeRoutingTests.class); public void testSimpleFailedNodeTest() { AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 787789d410ff9..05e77c4cf4ba1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -34,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.VersionUtils; @@ -56,7 +57,7 @@ import static org.hamcrest.Matchers.nullValue; public class FailedShardsRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(FailedShardsRoutingTests.class); + private final Logger logger = LogManager.getLogger(FailedShardsRoutingTests.class); public void testFailedShardPrimaryRelocatingToAndFrom() { AllocationService allocation = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index d6e54b6e3b331..6086482a442fc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.nullValue; public class IndexBalanceTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(IndexBalanceTests.class); + private final Logger logger = LogManager.getLogger(IndexBalanceTests.class); public void testBalanceAllNodesStarted() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 711e7401ad217..89d19e03957a5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -49,7 +50,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; @@ -76,7 +76,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class); + private final Logger logger = LogManager.getLogger(NodeVersionAllocationDeciderTests.class); public void testDoNotAllocateFromPrimary() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java index 35a9be017d5d4..cdd868c158eeb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -28,14 +29,13 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.hamcrest.Matchers.equalTo; public class PreferPrimaryAllocationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class); + private final Logger logger = LogManager.getLogger(PreferPrimaryAllocationTests.class); public void testPreferPrimaryAllocationOverReplicas() { logger.info("create an allocation with 1 initial recoveries"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java index a634d32d71d04..f306184c5764f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -38,7 +38,7 @@ import static org.hamcrest.Matchers.nullValue; public class PrimaryElectionRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class); + private final Logger logger = LogManager.getLogger(PrimaryElectionRoutingTests.class); public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java index e5725ed61ef15..dcca97369e7f9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.equalTo; public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class); + private final Logger logger = LogManager.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class); public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index 1406e4d6d6121..565f9c919d0e2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; @@ -32,7 +33,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.nullValue; public class RebalanceAfterActiveTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(RebalanceAfterActiveTests.class); + private final Logger logger = LogManager.getLogger(RebalanceAfterActiveTests.class); public void testRebalanceOnlyAfterAllShardsAreActive() { final long[] sizes = new long[5]; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java index ab64d0131eca1..7a90f93516ab1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.nullValue; public class ReplicaAllocatedAfterPrimaryTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class); + private final Logger logger = LogManager.getLogger(ReplicaAllocatedAfterPrimaryTests.class); public void testBackupIsAllocatedAfterPrimary() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index 9401cc1ca6f45..bc88158356c63 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +32,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.not; public class RoutingNodesIntegrityTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(IndexBalanceTests.class); + private final Logger logger = LogManager.getLogger(IndexBalanceTests.class); public void testBalanceAllNodesStarted() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index 4b74cee867138..f059125f1ea3d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; @@ -39,7 +40,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -52,7 +52,7 @@ import static org.hamcrest.Matchers.equalTo; public class SameShardRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(SameShardRoutingTests.class); + private final Logger logger = LogManager.getLogger(SameShardRoutingTests.class); public void testSameHost() { AllocationService strategy = createAllocationService( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index 534e2af5a8925..3cf53e60c4844 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -38,7 +38,7 @@ import static org.hamcrest.Matchers.equalTo; public class ShardVersioningTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(ShardVersioningTests.class); + private final Logger logger = LogManager.getLogger(ShardVersioningTests.class); public void testSimple() { AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 7530e34cb8383..314318fc29f0d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +32,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.equalTo; public class ShardsLimitAllocationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(ShardsLimitAllocationTests.class); + private final Logger logger = LogManager.getLogger(ShardsLimitAllocationTests.class); public void testIndexLevelShardsLimitAllocate() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index 25d29d0fca482..7bdad46d61c11 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +32,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import java.util.HashSet; @@ -50,7 +50,7 @@ import static org.hamcrest.Matchers.nullValue; public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class); + private final Logger logger = LogManager.getLogger(SingleShardNoReplicasRoutingTests.class); public void testSingleIndexStartedShard() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java index 44c8d5ac4d3e6..ac8f1af219b3f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.nullValue; public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class); + private final Logger logger = LogManager.getLogger(SingleShardOneReplicaRoutingTests.class); public void testSingleIndexFirstStartPrimaryThenBackups() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index 0239ee6235e2d..a7179bfba7870 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.nullValue; public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class); + private final Logger logger = LogManager.getLogger(TenShardsOneReplicaRoutingTests.class); public void testSingleIndexFirstStartPrimaryThenBackups() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 01586d9c49575..769399a804998 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -21,6 +21,8 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -41,7 +43,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -62,7 +63,7 @@ import static org.hamcrest.Matchers.equalTo; public class ThrottlingAllocationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(ThrottlingAllocationTests.class); + private final Logger logger = LogManager.getLogger(ThrottlingAllocationTests.class); public void testPrimaryRecoveryThrottling() { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java index 3001a4ba9e423..44da514c0b5c5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.nullValue; public class UpdateNumberOfReplicasTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class); + private final Logger logger = LogManager.getLogger(UpdateNumberOfReplicasTests.class); public void testUpdateNumberOfReplicas() { AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 5be51ceb3ae48..20bd5957aeb1d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -32,7 +34,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -49,7 +50,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { - private final Logger logger = Loggers.getLogger(EnableAllocationTests.class); + private final Logger logger = LogManager.getLogger(EnableAllocationTests.class); public void testClusterEnableNone() { AllocationService strategy = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/test/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java new file mode 100644 index 0000000000000..f9958d3aba2cd --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.cluster.shards; + +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class ClusterShardLimitIT extends ESIntegTestCase { + private static final String shardsPerNodeKey = MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(); + + public void testSettingClusterMaxShards() { + int shardsPerNode = between(1, 500_000); + setShardsPerNode(shardsPerNode); + } + + public void testMinimumPerNode() { + int negativeShardsPerNode = between(-50_000, 0); + try { + if (frequently()) { + client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, negativeShardsPerNode).build()) + .get(); + } else { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(shardsPerNodeKey, negativeShardsPerNode).build()) + .get(); + } + fail("should not be able to set negative shards per node"); + } catch (IllegalArgumentException ex) { + assertEquals("Failed to parse value [" + negativeShardsPerNode + "] for setting [cluster.max_shards_per_node] must be >= 1", + ex.getMessage()); + } + } + + private void setShardsPerNode(int shardsPerNode) { + try { + ClusterUpdateSettingsResponse response; + if (frequently()) { + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, shardsPerNode).build()) + .get(); + assertEquals(shardsPerNode, response.getPersistentSettings().getAsInt(shardsPerNodeKey, -1).intValue()); + } else { + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(shardsPerNodeKey, shardsPerNode).build()) + .get(); + assertEquals(shardsPerNode, response.getTransientSettings().getAsInt(shardsPerNodeKey, -1).intValue()); + } + } catch (IllegalArgumentException ex) { + fail(ex.getMessage()); + } + } + + public static class ShardCounts { + private final int shardsPerNode; + + private final int firstIndexShards; + private final int firstIndexReplicas; + + private final int failingIndexShards; + private final int failingIndexReplicas; + + private ShardCounts(int shardsPerNode, + int firstIndexShards, + int firstIndexReplicas, + int failingIndexShards, + int failingIndexReplicas) { + this.shardsPerNode = shardsPerNode; + this.firstIndexShards = firstIndexShards; + this.firstIndexReplicas = firstIndexReplicas; + this.failingIndexShards = failingIndexShards; + this.failingIndexReplicas = failingIndexReplicas; + } + + public static ShardCounts forDataNodeCount(int dataNodes) { + int mainIndexReplicas = between(0, dataNodes - 1); + int mainIndexShards = between(1, 10); + int totalShardsInIndex = (mainIndexReplicas + 1) * mainIndexShards; + int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes); + int totalCap = shardsPerNode * dataNodes; + + int failingIndexShards; + int failingIndexReplicas; + if (dataNodes > 1 && frequently()) { + failingIndexShards = Math.max(1, totalCap - totalShardsInIndex); + failingIndexReplicas = between(1, dataNodes - 1); + } else { + failingIndexShards = totalCap - totalShardsInIndex + between(1, 10); + failingIndexReplicas = 0; + } + + return new ShardCounts(shardsPerNode, mainIndexShards, mainIndexReplicas, failingIndexShards, failingIndexReplicas); + } + + public int getShardsPerNode() { + return shardsPerNode; + } + + public int getFirstIndexShards() { + return firstIndexShards; + } + + public int getFirstIndexReplicas() { + return firstIndexReplicas; + } + + public int getFailingIndexShards() { + return failingIndexShards; + } + + public int getFailingIndexReplicas() { + return failingIndexReplicas; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/common/UUIDTests.java b/server/src/test/java/org/elasticsearch/common/UUIDTests.java index 849db0dc71259..dcc440acbcd1a 100644 --- a/server/src/test/java/org/elasticsearch/common/UUIDTests.java +++ b/server/src/test/java/org/elasticsearch/common/UUIDTests.java @@ -20,6 +20,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; @@ -28,7 +29,6 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.store.Directory; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -117,7 +117,7 @@ public void testUUIDThreaded(UUIDGenerator uuidSource) { } public void testCompression() throws Exception { - Logger logger = Loggers.getLogger(UUIDTests.class); + Logger logger = LogManager.getLogger(UUIDTests.class); // Low number so that the test runs quickly, but the results are more interesting with larger numbers // of indexed documents assertThat(testCompression(100000, 10000, 3, logger), Matchers.lessThan(14d)); // ~12 in practice diff --git a/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java b/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java index 490f7961a894d..537bb3db70aca 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.logging; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; + +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; @@ -49,7 +51,7 @@ public class DeprecationLoggerTests extends ESTestCase { private static final RegexMatcher warningValueMatcher = matches(WARNING_HEADER_PATTERN.pattern()); - private final DeprecationLogger logger = new DeprecationLogger(Loggers.getLogger(getClass())); + private final DeprecationLogger logger = new DeprecationLogger(LogManager.getLogger(getClass())); @Override protected boolean enableWarningsCheck() { diff --git a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java index 9b69a876c1d2a..43df5a900366b 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.logging; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; @@ -57,7 +58,7 @@ ParameterizedMessage lastParameterizedMessage() { public void testParameterizedMessageLambda() throws Exception { final MockAppender appender = new MockAppender("trace_appender"); appender.start(); - final Logger testLogger = Loggers.getLogger(LoggersTests.class); + final Logger testLogger = LogManager.getLogger(LoggersTests.class); Loggers.addAppender(testLogger, appender); Loggers.setLevel(testLogger, Level.TRACE); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java b/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java index 429950bf8530a..52f3a05ce0866 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.common.settings.Settings; @@ -29,6 +30,7 @@ import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.LIMIT_LOCAL_PORTS_COUNT; import static org.elasticsearch.transport.TcpTransport.PORT; +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34781") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class SettingsBasedHostProviderIT extends ESIntegTestCase { diff --git a/server/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java b/server/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java index 2b58831a956ca..2a45947706298 100644 --- a/server/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -292,7 +292,7 @@ static class Entry { private AtomicInteger reroute = new AtomicInteger(); TestFetch(ThreadPool threadPool) { - super(Loggers.getLogger(TestFetch.class), "test", new ShardId("test", "_na_", 1), null); + super(LogManager.getLogger(TestFetch.class), "test", new ShardId("test", "_na_", 1), null); this.threadPool = threadPool; } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 4a0d6a8e8884b..ff8393b659d14 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -35,7 +36,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; @@ -63,7 +63,7 @@ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class GatewayIndexStateIT extends ESIntegTestCase { - private final Logger logger = Loggers.getLogger(GatewayIndexStateIT.class); + private final Logger logger = LogManager.getLogger(GatewayIndexStateIT.class); public void testMappingMetaDataParsed() throws Exception { logger.info("--> starting 1 nodes"); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index b4e98775d97ac..cfac866895f0e 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -29,9 +29,14 @@ import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.shards.ClusterShardLimitIT; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -80,6 +85,7 @@ import java.util.stream.Stream; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.cluster.shards.ClusterShardLimitIT.ShardCounts.forDataNodeCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; @@ -567,4 +573,76 @@ public void testConflictingEngineFactories() throws IOException { assertThat(e, hasToString(new RegexMatcher(pattern))); } + public void testOverShardLimit() { + int nodesInCluster = randomIntBetween(1,100); + ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster); + + Settings clusterSettings = Settings.builder() + .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode()) + .build(); + + ClusterState state = createClusterForShardLimitTest(nodesInCluster, counts.getFirstIndexShards(), counts.getFirstIndexReplicas(), + clusterSettings); + + int shardsToAdd = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + Optional errorMessage = IndicesService.checkShardLimit(shardsToAdd, state, deprecationLogger); + + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + assertWarnings("In a future major version, this request will fail because this action would add [" + + totalShards + "] total shards, but this cluster currently has [" + currentShards + "]/[" + maxShards + "] maximum shards open."+ + " Before upgrading, reduce the number of shards in your cluster or adjust the cluster setting [cluster.max_shards_per_node]."); + assertFalse(errorMessage.isPresent()); + } + + public void testUnderShardLimit() { + int nodesInCluster = randomIntBetween(2,100); + // Calculate the counts for a cluster 1 node smaller than we have to ensure we have headroom + ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster - 1); + + Settings clusterSettings = Settings.builder() + .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode()) + .build(); + + ClusterState state = createClusterForShardLimitTest(nodesInCluster, counts.getFirstIndexShards(), counts.getFirstIndexReplicas(), + clusterSettings); + + int existingShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int shardsToAdd = randomIntBetween(1, (counts.getShardsPerNode() * nodesInCluster) - existingShards); + DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + Optional errorMessage = IndicesService.checkShardLimit(shardsToAdd, state, deprecationLogger); + + assertFalse(errorMessage.isPresent()); + } + + public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int shardsInIndex, int replicas, + Settings clusterSettings) { + ImmutableOpenMap.Builder dataNodes = ImmutableOpenMap.builder(); + for (int i = 0; i < nodesInCluster; i++) { + dataNodes.put(randomAlphaOfLengthBetween(5,15), mock(DiscoveryNode.class)); + } + DiscoveryNodes nodes = mock(DiscoveryNodes.class); + when(nodes.getDataNodes()).thenReturn(dataNodes.build()); + + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(randomAlphaOfLengthBetween(5, 15)) + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .creationDate(randomLong()) + .numberOfShards(shardsInIndex) + .numberOfReplicas(replicas); + MetaData.Builder metaData = MetaData.builder().put(indexMetaData); + if (randomBoolean()) { + metaData.transientSettings(clusterSettings); + } else { + metaData.persistentSettings(clusterSettings); + } + + return ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(nodes) + .build(); + } + + } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 77c77baaa11a3..3d8b1decea41e 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -143,6 +143,7 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th // mocks clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); IndicesService indicesService = mock(IndicesService.class); // MetaDataCreateIndexService creates indices using its IndicesService instance to check mappings -> fake it here try { diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 3bfcfdd3ab187..83411ad2bc208 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -156,7 +156,7 @@ public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { listener.latch.await(); assertNotNull(listener.error); assertNull(listener.result); - assertEquals("no such index", listener.error.getMessage()); + assertEquals("no such index [index not found]", listener.error.getMessage()); } public void testFailAfterIntermediateCommit() throws InterruptedException { diff --git a/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 96bb9ab8a1ce7..f37dde7ec09a6 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -76,14 +76,14 @@ public void testSimpleCloseMissingIndex() { Client client = client(); Exception e = expectThrows(IndexNotFoundException.class, () -> client.admin().indices().prepareClose("test1").execute().actionGet()); - assertThat(e.getMessage(), is("no such index")); + assertThat(e.getMessage(), is("no such index [test1]")); } public void testSimpleOpenMissingIndex() { Client client = client(); Exception e = expectThrows(IndexNotFoundException.class, () -> client.admin().indices().prepareOpen("test1").execute().actionGet()); - assertThat(e.getMessage(), is("no such index")); + assertThat(e.getMessage(), is("no such index [test1]")); } public void testCloseOneMissingIndex() { @@ -93,7 +93,7 @@ public void testCloseOneMissingIndex() { assertThat(healthResponse.isTimedOut(), equalTo(false)); Exception e = expectThrows(IndexNotFoundException.class, () -> client.admin().indices().prepareClose("test1", "test2").execute().actionGet()); - assertThat(e.getMessage(), is("no such index")); + assertThat(e.getMessage(), is("no such index [test2]")); } public void testCloseOneMissingIndexIgnoreMissing() { @@ -114,7 +114,7 @@ public void testOpenOneMissingIndex() { assertThat(healthResponse.isTimedOut(), equalTo(false)); Exception e = expectThrows(IndexNotFoundException.class, () -> client.admin().indices().prepareOpen("test1", "test2").execute().actionGet()); - assertThat(e.getMessage(), is("no such index")); + assertThat(e.getMessage(), is("no such index [test2]")); } public void testOpenOneMissingIndexIgnoreMissing() { diff --git a/server/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/server/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index 2b3f507270838..59d04c767d809 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.state; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexClosedException; @@ -40,7 +40,7 @@ @ESIntegTestCase.ClusterScope(minNumDataNodes = 2) public class SimpleIndexStateIT extends ESIntegTestCase { - private final Logger logger = Loggers.getLogger(SimpleIndexStateIT.class); + private final Logger logger = LogManager.getLogger(SimpleIndexStateIT.class); public void testSimpleOpenClose() { logger.info("--> creating test index"); diff --git a/server/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java b/server/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java index 670f9cdfa08be..c7f27dc81a7c4 100644 --- a/server/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java +++ b/server/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java @@ -62,7 +62,7 @@ public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex")); assertThat(mgetResponse.getResponses()[1].isFailed(), is(true)); - assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), is("no such index")); + assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), is("no such index [nonExistingIndex]")); assertThat(((ElasticsearchException) mgetResponse.getResponses()[1].getFailure().getFailure()).getIndex().getName(), is("nonExistingIndex")); @@ -72,7 +72,7 @@ public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { assertThat(mgetResponse.getResponses().length, is(1)); assertThat(mgetResponse.getResponses()[0].getIndex(), is("nonExistingIndex")); assertThat(mgetResponse.getResponses()[0].isFailed(), is(true)); - assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), is("no such index")); + assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), is("no such index [nonExistingIndex]")); assertThat(((ElasticsearchException) mgetResponse.getResponses()[0].getFailure().getFailure()).getIndex().getName(), is("nonExistingIndex")); } diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index b0d25f43bd694..720fd0acdf082 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.recovery; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -55,7 +55,7 @@ @TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.index.seqno:TRACE,org.elasticsearch.indices.recovery:TRACE") public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { - private final Logger logger = Loggers.getLogger(RecoveryWhileUnderLoadIT.class); + private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class); public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> creating test index ..."); diff --git a/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java index 2e28d16c71dcd..81bd844c8fbf6 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -29,7 +30,6 @@ import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; @@ -269,7 +269,7 @@ public void enableBlock() { public Map, Object>> pluginScripts() { return Collections.singletonMap(SCRIPT_NAME, params -> { LeafFieldsLookup fieldsLookup = (LeafFieldsLookup) params.get("_fields"); - Loggers.getLogger(SearchCancellationIT.class).info("Blocking on the document {}", fieldsLookup.get("_id")); + LogManager.getLogger(SearchCancellationIT.class).info("Blocking on the document {}", fieldsLookup.get("_id")); hits.incrementAndGet(); try { awaitBusy(() -> shouldBlock.get() == false); diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index c10a721415b43..ca20e6ec4788d 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -45,11 +45,11 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.derivative.InternalDerivative; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.InternalDerivative; +import org.elasticsearch.search.aggregations.pipeline.MovAvgModel; +import org.elasticsearch.search.aggregations.pipeline.SimpleModel; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index 626a2264e1f07..ac34a96f0d992 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -71,10 +71,10 @@ import org.elasticsearch.search.aggregations.metrics.InternalTopHitsTests; import org.elasticsearch.search.aggregations.metrics.InternalValueCountTests; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValueTests; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValueTests; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.InternalPercentilesBucketTests; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.InternalExtendedStatsBucketTests; -import org.elasticsearch.search.aggregations.pipeline.derivative.InternalDerivativeTests; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValueTests; +import org.elasticsearch.search.aggregations.pipeline.InternalPercentilesBucketTests; +import org.elasticsearch.search.aggregations.pipeline.InternalExtendedStatsBucketTests; +import org.elasticsearch.search.aggregations.pipeline.InternalDerivativeTests; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesBuilderTests.java index d8d7f416d2d84..4cbc78acad1c2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesBuilderTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; -import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder; import org.elasticsearch.test.AbstractSerializingTestCase; import org.junit.Before; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java index 8b636f2d6a6a0..7a4e0fb705918 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java @@ -35,8 +35,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders; -import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.test.ESTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java index 365b6ddc218f1..93d966d0a4cad 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; import org.elasticsearch.test.ESIntegTestCase; import java.util.HashMap; @@ -33,7 +33,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.maxBucket; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 1619989f38bca..ce43f9df408c3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -69,7 +69,7 @@ import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -87,7 +87,7 @@ import static org.elasticsearch.index.mapper.SeqNoFieldMapper.PRIMARY_TERM_NAME; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketScript; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 1321c8bca4711..5d0bbf0f853a8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.search.aggregations.metrics; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -81,7 +81,7 @@ private static double[] randomPercents(long minValue, long maxValue) { } } Arrays.sort(percents); - Loggers.getLogger(HDRPercentileRanksIT.class).info("Using values={}", Arrays.toString(percents)); + LogManager.getLogger(HDRPercentileRanksIT.class).info("Using values={}", Arrays.toString(percents)); return percents; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 67eb4939ae529..256717f809f3b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.search.aggregations.metrics; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -82,7 +82,7 @@ private static double[] randomPercentiles() { } } Arrays.sort(percentiles); - Loggers.getLogger(HDRPercentilesIT.class).info("Using percentiles={}", Arrays.toString(percentiles)); + LogManager.getLogger(HDRPercentilesIT.class).info("Using percentiles={}", Arrays.toString(percentiles)); return percentiles; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsBucketTests.java index cb4b024f99da0..9604b583e8b4a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsBucketTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.InternalStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.ParsedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.InternalStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; import java.util.Collections; import java.util.List; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 8cbf9883fe534..4a68cb6858213 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.search.aggregations.metrics; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -81,7 +81,7 @@ private static double[] randomPercents(long minValue, long maxValue) { } } Arrays.sort(percents); - Loggers.getLogger(TDigestPercentileRanksIT.class).info("Using values={}", Arrays.toString(percents)); + LogManager.getLogger(TDigestPercentileRanksIT.class).info("Using values={}", Arrays.toString(percents)); return percents; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 73ce6c7ece7a6..25e3435ea9724 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.search.aggregations.metrics; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -81,7 +81,7 @@ private static double[] randomPercentiles() { } } Arrays.sort(percentiles); - Loggers.getLogger(TDigestPercentilesIT.class).info("Using percentiles={}", Arrays.toString(percentiles)); + LogManager.getLogger(TDigestPercentilesIT.class).info("Using percentiles={}", Arrays.toString(percentiles)); return percentiles; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/AbstractBucketMetricsTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AbstractBucketMetricsTestCase.java similarity index 91% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/AbstractBucketMetricsTestCase.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AbstractBucketMetricsTestCase.java index 2b53a236c3bc8..8fdc23a025b95 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/AbstractBucketMetricsTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AbstractBucketMetricsTestCase.java @@ -17,10 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.pipeline.BucketMetricsPipelineAggregationBuilder; public abstract class AbstractBucketMetricsTestCase> extends BasePipelineAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java index dd8938bc8786a..c0dd46011755b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg; +package org.elasticsearch.search.aggregations.pipeline; import org.apache.lucene.document.Document; import org.apache.lucene.document.SortedNumericDocValuesField; @@ -41,6 +41,8 @@ import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalAvg; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregator; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java index 8514b1a0c0da9..5b044af53f239 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java @@ -37,7 +37,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.avgBucket; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.avgBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/AvgBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/AvgBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketTests.java index c504aa3f46183..0dc10cb7a7a13 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/AvgBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketTests.java @@ -17,13 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg.AvgBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import java.util.Collections; @@ -50,18 +48,18 @@ public void testValidate() { () -> builder.validate(null, aggBuilders, Collections.emptySet())); assertEquals(PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName() + " aggregation does not exist for aggregation [name]: invalid_agg>metric", ex.getMessage()); - + // Now try to point to a single bucket agg AvgBucketPipelineAggregationBuilder builder2 = new AvgBucketPipelineAggregationBuilder("name", "global>metric"); ex = expectThrows(IllegalArgumentException.class, () -> builder2.validate(null, aggBuilders, Collections.emptySet())); assertEquals("The first aggregation in " + PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName() + " must be a multi-bucket aggregation for aggregation [name] found :" + GlobalAggregationBuilder.class.getName() + " for buckets path: global>metric", ex.getMessage()); - + // Now try to point to a valid multi-bucket agg (no exception should be thrown) AvgBucketPipelineAggregationBuilder builder3 = new AvgBucketPipelineAggregationBuilder("name", "terms>metric"); builder3.validate(null, aggBuilders, Collections.emptySet()); - + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index bd92c73f997f1..040eb66e7cfcc 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -47,7 +47,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketScript; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java index c213619183be1..20684b6383f1f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder; import java.util.HashMap; import java.util.Map; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java index 05de849854f67..7314533d0b6f0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -46,8 +46,8 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketSelector; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketSelector; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorTests.java index 8dd63942d866f..fb8827860837b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.bucketselector.BucketSelectorPipelineAggregationBuilder; import java.util.HashMap; import java.util.Map; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java similarity index 98% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortIT.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java index df2d7e64f4605..bc3610fca8e86 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketsort; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -28,7 +28,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Avg; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -45,7 +44,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketSort; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketSort; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortTests.java index 48ce6073beae9..cb8d8db8cdc8c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketsort/BucketSortTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortTests.java @@ -16,10 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketsort; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; +import org.elasticsearch.search.aggregations.pipeline.BucketSortPipelineAggregationBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index 08337ef969f77..961db6931fb6a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -45,8 +45,6 @@ import org.elasticsearch.search.aggregations.metrics.InternalAvg; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumTests.java index 3b1514a8c38fd..edf879ce77f68 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; -import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder; public class CumulativeSumTests extends BasePipelineAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java index aaa296fc31738..95710ead1a4e1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matcher; @@ -45,7 +44,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java index 5944777b628f5..7222bc19b596a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java @@ -32,8 +32,6 @@ import org.elasticsearch.search.aggregations.metrics.Stats; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -49,8 +47,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.derivative; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.movingAvg; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.closeTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeTests.java index 4b98e2ee6b263..0196e2c85b0c6 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; public class DerivativeTests extends BasePipelineAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index e1aa1dfce3f9e..436a583695b5d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -31,7 +31,6 @@ import org.elasticsearch.search.aggregations.metrics.ExtendedStats.Bounds; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucket; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -41,7 +40,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.extendedStatsBucket; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.extendedStatsBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; @@ -367,7 +366,8 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) + .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum") + .gapPolicy(GapPolicy.INSERT_ZEROS))) .execute().actionGet(); assertSearchResponse(response); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/ExtendedStatsBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketTests.java similarity index 94% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/ExtendedStatsBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketTests.java index 43303205b463e..9930541cb007e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/ExtendedStatsBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketTests.java @@ -17,15 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValueTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValueTests.java similarity index 95% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValueTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValueTests.java index 85c83d41058c1..b10767cbeb563 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValueTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValueTests.java @@ -17,11 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.pipeline.BucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivativeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalDerivativeTests.java similarity index 95% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivativeTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalDerivativeTests.java index 3ea7f1055743f..6522e7591e503 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivativeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalDerivativeTests.java @@ -17,11 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.derivative; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.pipeline.InternalDerivative; +import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalExtendedStatsBucketTests.java similarity index 79% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalExtendedStatsBucketTests.java index 03481ab7f6516..c647e38837314 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/InternalExtendedStatsBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalExtendedStatsBucketTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; @@ -25,6 +25,8 @@ import org.elasticsearch.search.aggregations.metrics.InternalExtendedStatsTests; import org.elasticsearch.search.aggregations.metrics.InternalExtendedStats; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.InternalExtendedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; import java.util.Collections; import java.util.List; @@ -33,8 +35,11 @@ public class InternalExtendedStatsBucketTests extends InternalExtendedStatsTests { @Override - protected InternalExtendedStatsBucket createInstance(String name, long count, double sum, double min, double max, double sumOfSqrs, - double sigma, DocValueFormat formatter, List pipelineAggregators, Map metaData) { + protected InternalExtendedStatsBucket createInstance(String name, long count, double sum, double min, + double max, double sumOfSqrs, + double sigma, DocValueFormat formatter, + List pipelineAggregators, + Map metaData) { return new InternalExtendedStatsBucket(name, count, sum, min, max, sumOfSqrs, sigma, formatter, pipelineAggregators, Collections.emptyMap()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java similarity index 97% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java index c1d3ffeb0e553..176966174c430 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; @@ -25,6 +25,8 @@ import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.InternalPercentilesBucket; +import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; import org.elasticsearch.test.InternalAggregationTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java index 4841c5e596a16..232941ae392a9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.PipelineAggregatorBuilders; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -39,8 +40,6 @@ import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max.MaxBucketPipelineAggregationBuilder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -52,7 +51,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.maxBucket; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/MaxBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/MaxBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketTests.java index cbf31130d38dc..c55152c68c3a6 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/MaxBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketTests.java @@ -17,13 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max.MaxBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java index 82629363f8d8a..081304d0709f5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -38,7 +37,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.minBucket; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.minBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/MinBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/MinBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketTests.java index eca1db24ff7ff..317f1360c7845 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/MinBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketTests.java @@ -17,13 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.min.MinBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgIT.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgIT.java index 41bbf053ff18b..bfc04151a5cd3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.moving.avg; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -29,16 +29,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.metrics.Avg; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregationHelperTests; -import org.elasticsearch.search.aggregations.pipeline.SimpleValue; -import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.EwmaModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltLinearModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltWintersModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModelBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -58,8 +48,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.max; import static org.elasticsearch.search.aggregations.AggregationBuilders.min; import static org.elasticsearch.search.aggregations.AggregationBuilders.range; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.derivative; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.movingAvg; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -204,7 +194,9 @@ private void setupExpected(MovAvgType type, MetricTarget target, int windowSize) } else { // If this isn't a gap, or is a _count, just insert the value - metricValue = target.equals(MetricTarget.VALUE) ? PipelineAggregationHelperTests.calculateMetric(docValues, metric) : mockBucket.count; + metricValue = target.equals(MetricTarget.VALUE) + ? PipelineAggregationHelperTests.calculateMetric(docValues, metric) + : mockBucket.count; } if (window.size() > 0) { @@ -663,8 +655,11 @@ public void testPredictNegativeKeysAtStart() { .interval(1) .subAggregation(avg("avg").field(VALUE_FIELD)) .subAggregation( - movingAvg("movavg_values", "avg").window(windowSize).modelBuilder(new SimpleModel.SimpleModelBuilder()) - .gapPolicy(gapPolicy).predict(5))).execute().actionGet(); + movingAvg("movavg_values", "avg") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy).predict(5))) + .execute().actionGet(); assertSearchResponse(response); @@ -879,11 +874,17 @@ public void testTwoMovAvgsWithPredictions() { .subAggregation(avg("avg").field(VALUE_FIELD)) .subAggregation(derivative("deriv", "avg").gapPolicy(gapPolicy)) .subAggregation( - movingAvg("avg_movavg", "avg").window(windowSize).modelBuilder(new SimpleModel.SimpleModelBuilder()) - .gapPolicy(gapPolicy).predict(12)) + movingAvg("avg_movavg", "avg") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .predict(12)) .subAggregation( - movingAvg("deriv_movavg", "deriv").window(windowSize).modelBuilder(new SimpleModel.SimpleModelBuilder()) - .gapPolicy(gapPolicy).predict(12)) + movingAvg("deriv_movavg", "deriv") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .predict(12)) ).execute().actionGet(); assertSearchResponse(response); @@ -1281,7 +1282,8 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, if (expectedCount == null) { assertThat("[_count] movavg is not null", countMovAvg, nullValue()); } else if (Double.isNaN(expectedCount)) { - assertThat("[_count] movavg should be NaN, but is ["+countMovAvg.value()+"] instead", countMovAvg.value(), equalTo(Double.NaN)); + assertThat("[_count] movavg should be NaN, but is ["+countMovAvg.value()+"] instead", + countMovAvg.value(), equalTo(Double.NaN)); } else { assertThat("[_count] movavg is null", countMovAvg, notNullValue()); assertEquals("[_count] movavg does not match expected [" + countMovAvg.value() + " vs " + expectedCount + "]", @@ -1293,7 +1295,8 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, if (expectedValue == null) { assertThat("[value] movavg is not null", valuesMovAvg, Matchers.nullValue()); } else if (Double.isNaN(expectedValue)) { - assertThat("[value] movavg should be NaN, but is ["+valuesMovAvg.value()+"] instead", valuesMovAvg.value(), equalTo(Double.NaN)); + assertThat("[value] movavg should be NaN, but is ["+valuesMovAvg.value()+"] instead", + valuesMovAvg.value(), equalTo(Double.NaN)); } else { assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]", @@ -1325,8 +1328,8 @@ private MovAvgModelBuilder randomModelBuilder(double padding) { } } - private ValuesSourceAggregationBuilder> randomMetric(String name, - String field) { + private ValuesSourceAggregationBuilder> randomMetric(String name, String field) { int rand = randomIntBetween(0,3); switch (rand) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgTests.java similarity index 88% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgTests.java index 659fad3f45ce6..24ec6d9d685d5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgTests.java @@ -17,19 +17,19 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.moving.avg; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.EwmaModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltLinearModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltWintersModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltWintersModel.SeasonalityType; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; +import org.elasticsearch.search.aggregations.pipeline.MovAvgPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.EwmaModel; +import org.elasticsearch.search.aggregations.pipeline.HoltLinearModel; +import org.elasticsearch.search.aggregations.pipeline.HoltWintersModel; +import org.elasticsearch.search.aggregations.pipeline.HoltWintersModel.SeasonalityType; +import org.elasticsearch.search.aggregations.pipeline.LinearModel; +import org.elasticsearch.search.aggregations.pipeline.SimpleModel; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgUnitTests.java similarity index 97% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgUnitTests.java index 55c31013fd9de..38ed1c1dc3f24 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgUnitTests.java @@ -17,15 +17,15 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.moving.avg; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.collect.EvictingQueue; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.EwmaModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltLinearModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltWintersModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; -import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; +import org.elasticsearch.search.aggregations.pipeline.EwmaModel; +import org.elasticsearch.search.aggregations.pipeline.HoltLinearModel; +import org.elasticsearch.search.aggregations.pipeline.HoltWintersModel; +import org.elasticsearch.search.aggregations.pipeline.LinearModel; +import org.elasticsearch.search.aggregations.pipeline.MovAvgModel; +import org.elasticsearch.search.aggregations.pipeline.SimpleModel; import org.elasticsearch.test.ESTestCase; import java.text.ParseException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregationBuilderSerializationTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilderSerializationTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregationBuilderSerializationTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilderSerializationTests.java index 218cbdf62ca05..49923640805bd 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnPipelineAggregationBuilderSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilderSerializationTests.java @@ -17,11 +17,12 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movfn; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.pipeline.MovFnPipelineAggregationBuilder; import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnUnitTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnUnitTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java index db333a8ed7a08..842320e278118 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnUnitTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movfn; +package org.elasticsearch.search.aggregations.pipeline; import org.apache.lucene.document.Document; import org.apache.lucene.document.LongPoint; @@ -42,6 +42,9 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; +import org.elasticsearch.search.aggregations.pipeline.MovFnPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript; +import org.elasticsearch.search.aggregations.pipeline.MovingFunctions; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnWhitelistedFunctionTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnWhitelistedFunctionTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnWhitelistedFunctionTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnWhitelistedFunctionTests.java index 6d0e388e64325..f8fe71596942c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/movfn/MovFnWhitelistedFunctionTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnWhitelistedFunctionTests.java @@ -17,9 +17,10 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.movfn; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.collect.EvictingQueue; +import org.elasticsearch.search.aggregations.pipeline.MovingFunctions; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index 8f77c305229d9..330bbc647d934 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucket; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -42,7 +41,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.percentilesBucket; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.percentilesBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/PercentilesBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketTests.java similarity index 94% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/PercentilesBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketTests.java index a6040aaf9f67c..7ad05059a731a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/PercentilesBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketTests.java @@ -17,15 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java similarity index 94% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java index 68257045137d1..8522beeecde8d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java @@ -17,16 +17,13 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.serialdiff; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregationHelperTests; -import org.elasticsearch.search.aggregations.pipeline.SimpleValue; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -43,7 +40,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.max; import static org.elasticsearch.search.aggregations.AggregationBuilders.min; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.diff; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.diff; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; @@ -80,7 +77,8 @@ public String toString(){ } } - private ValuesSourceAggregationBuilder> randomMetric(String name, String field) { + private ValuesSourceAggregationBuilder> randomMetric(String name, String field) { int rand = randomIntBetween(0,3); switch (rand) { @@ -189,7 +187,9 @@ private void setupExpected(MetricTarget target) { } else { // If this isn't a gap, or is a _count, just insert the value - metricValue = target.equals(MetricTarget.VALUE) ? PipelineAggregationHelperTests.calculateMetric(docValues, metric) : mockBucket.count; + metricValue = target.equals(MetricTarget.VALUE) + ? PipelineAggregationHelperTests.calculateMetric(docValues, metric) + : mockBucket.count; } counter += 1; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDifferenceTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDifferenceTests.java index 7e71be69b64b7..78bf6954c5ef7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDifferenceTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SerialDifferenceTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder; public class SerialDifferenceTests extends BasePipelineAggregationTestCase { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java index f5d409951e3f1..eddbe47cae890 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucket; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -38,7 +37,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.statsBucket; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.statsBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/StatsBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/StatsBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketTests.java index bcd90778136bc..bf2ef7615df66 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/StatsBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketTests.java @@ -17,13 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java index a803b9fe3d466..40499ffb561e1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java @@ -37,7 +37,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.sumBucket; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.sumBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/SumBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/SumBucketTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketTests.java index be6c7f9234230..fdba878524146 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/SumBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketTests.java @@ -17,13 +17,11 @@ * under the License. */ -package org.elasticsearch.search.aggregations.pipeline.bucketmetrics; +package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.sum.SumBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index c9679ae2ea96c..c4b085e84cfb2 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.ExplainableSearchScript; +import org.elasticsearch.script.ExplainableScoreScript; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; @@ -94,7 +94,7 @@ public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { } } - static class MyScript extends ScoreScript implements ExplainableSearchScript { + static class MyScript extends ScoreScript implements ExplainableScoreScript { MyScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { super(params, lookup, leafContext); diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java index bff5a2b122d2f..3dc9e0aece71a 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java @@ -64,7 +64,7 @@ public void stopThreadPool() { } public void testConnectionProfileResolve() { - final ConnectionProfile defaultProfile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY); + final ConnectionProfile defaultProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY); assertEquals(defaultProfile, ConnectionProfile.resolveConnectionProfile(null, defaultProfile)); final ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); @@ -96,7 +96,7 @@ public void testConnectionProfileResolve() { } public void testDefaultConnectionProfile() { - ConnectionProfile profile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY); + ConnectionProfile profile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY); assertEquals(13, profile.getNumConnections()); assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); @@ -104,7 +104,7 @@ public void testDefaultConnectionProfile() { assertEquals(2, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); - profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.master", false).build()); + profile = ConnectionProfile.buildDefaultConnectionProfile(Settings.builder().put("node.master", false).build()); assertEquals(12, profile.getNumConnections()); assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); @@ -112,7 +112,7 @@ public void testDefaultConnectionProfile() { assertEquals(2, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); - profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.data", false).build()); + profile = ConnectionProfile.buildDefaultConnectionProfile(Settings.builder().put("node.data", false).build()); assertEquals(11, profile.getNumConnections()); assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); @@ -120,7 +120,7 @@ public void testDefaultConnectionProfile() { assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); - profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.data", false) + profile = ConnectionProfile.buildDefaultConnectionProfile(Settings.builder().put("node.data", false) .put("node.master", false).build()); assertEquals(10, profile.getNumConnections()); assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index c6fb1f406cf53..a17103789f251 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -156,27 +156,29 @@ public void testEnsureVersionCompatibility() { TcpTransport.ensureVersionCompatibility(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT), Version.CURRENT, randomBoolean()); - TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), Version.fromString("7.0.0"), true); + final Version version = Version.fromString("7.0.0"); + TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), version, true); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), Version.fromString("7.0.0"), false)); - assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [6.5.0]", ise.getMessage()); + TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), version, false)); + assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); // For handshake we are compatible with N-2 - TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), Version.fromString("7.0.0"), true); + TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), version, true); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), Version.fromString("7.0.0"), false)); - assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [6.5.0]", - ise.getMessage()); + TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), version, false)); + assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("7.0.0"), true)); - assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [6.5.0]", - ise.getMessage()); + TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), version, true)); + assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("7.0.0"), false)); - assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [6.5.0]", - ise.getMessage()); + TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), version, false)); + assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); } public void testCompressRequest() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index 9f2b60c6901f1..ac58b0e25b91e 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest; @@ -44,12 +45,12 @@ public class TransportLoggerTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender); + Loggers.addAppender(LogManager.getLogger(TransportLogger.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender); + Loggers.removeAppender(LogManager.getLogger(TransportLogger.class), appender); appender.stop(); super.tearDown(); } diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 9ef47af29cd94..2ef817e98ba75 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -151,7 +151,7 @@ public void testValidateEmptyCluster() { client().admin().indices().prepareValidateQuery().get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); + assertThat(e.getMessage(), is("no such index [null]")); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index c4881d06351be..60a7655e9ed46 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -79,6 +79,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; @@ -295,6 +296,9 @@ assert shardRoutings().stream() .filter(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())).findFirst().isPresent() == false : "replica with aId [" + replica.routingEntry().allocationId() + "] already exists"; replicas.add(replica); + if (replicationTargets != null) { + replicationTargets.addReplica(replica); + } updateAllocationIDsOnPrimary(); } @@ -310,6 +314,9 @@ public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardP newShard(shardRouting, shardPath, indexMetaData, null, null, getEngineFactory(shardRouting), () -> {}, EMPTY_EVENT_LISTENER); replicas.add(newReplica); + if (replicationTargets != null) { + replicationTargets.addReplica(newReplica); + } updateAllocationIDsOnPrimary(); return newReplica; } @@ -496,7 +503,7 @@ private void updateAllocationIDsOnPrimary() throws IOException { } private synchronized void computeReplicationTargets() { - this.replicationTargets = new ReplicationTargets(primary, replicas); + this.replicationTargets = new ReplicationTargets(this.primary, new ArrayList<>(this.replicas)); } private synchronized ReplicationTargets getReplicationTargets() { @@ -510,7 +517,25 @@ static final class ReplicationTargets { ReplicationTargets(IndexShard primary, List replicas) { this.primary = primary; - this.replicas = Collections.unmodifiableList(replicas); + this.replicas = replicas; + } + + /** + * This does not modify the replication targets, but only adds a replica to the list. + * If the targets is updated to include the given replica, a replication action would + * be able to find this replica to execute write requests on it. + */ + synchronized void addReplica(IndexShard replica) { + replicas.add(replica); + } + + synchronized IndexShard findReplicaShard(ShardRouting replicaRouting) { + for (IndexShard replica : replicas) { + if (replica.routingEntry().isSameAllocation(replicaRouting)) { + return replica; + } + } + throw new AssertionError("replica [" + replicaRouting + "] is not found; replicas[" + replicas + "] primary[" + primary + "]"); } } @@ -614,8 +639,7 @@ public void performOn( final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, final ActionListener listener) { - IndexShard replica = replicationTargets.replicas.stream() - .filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get(); + IndexShard replica = replicationTargets.findReplicaShard(replicaRouting); replica.acquireReplicaOperationPermit( getPrimaryShard().getPendingPrimaryTerm(), globalCheckpoint, diff --git a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java index c078e88da20ee..771188341a720 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java @@ -21,12 +21,13 @@ import com.carrotsearch.randomizedtesting.annotations.Listeners; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.BaseDirectoryTestCase; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TimeUnits; import org.elasticsearch.bootstrap.BootstrapForTesting; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; /** @@ -48,6 +49,6 @@ public abstract class EsBaseDirectoryTestCase extends BaseDirectoryTestCase { BootstrapForTesting.ensureInitialized(); } - protected final Logger logger = Loggers.getLogger(getClass()); + protected final Logger logger = LogManager.getLogger(getClass()); } diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 3c4c0da6322fb..0379012d2b882 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -25,8 +25,8 @@ import org.elasticsearch.index.similarity.ScriptedSimilarity.Field; import org.elasticsearch.index.similarity.ScriptedSimilarity.Query; import org.elasticsearch.index.similarity.ScriptedSimilarity.Term; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript; -import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions; +import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript; +import org.elasticsearch.search.aggregations.pipeline.MovingFunctions; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -90,8 +90,16 @@ public T compile(String name, String source, ScriptContext context, Map + ctx -> new FieldScript(parameters, lookup, ctx) { + @Override + public Object execute() { + Map vars = createVars(parameters); + vars.putAll(getLeafLookup().asMap()); + return script.apply(vars); + } + }; return context.factoryClazz.cast(factory); } else if (context.instanceClazz.equals(FieldScript.class)) { FieldScript.Factory factory = (parameters, lookup) -> @@ -311,20 +319,6 @@ public String getName() { return name; } - public SearchScript.LeafFactory createSearchScript(Map params, SearchLookup lookup) { - Map context = new HashMap<>(); - if (options != null) { - context.putAll(options); // TODO: remove this once scripts know to look for options under options key - context.put("options", options); - } - if (params != null) { - context.putAll(params); // TODO: remove this once scripts know to look for params under params key - context.put("params", params); - } - return new MockSearchScript(lookup, context, script != null ? script : ctx -> source); - } - - public FilterScript.LeafFactory createFilterScript(Map params, SearchLookup lookup) { return new MockFilterScript(lookup, params, script); } @@ -361,62 +355,6 @@ public ScriptedMetricAggContexts.ReduceScript createMetricAggReduceScript(Map, Object> script; - private final Map vars; - private final SearchLookup lookup; - - public MockSearchScript(SearchLookup lookup, Map vars, Function, Object> script) { - this.lookup = lookup; - this.vars = vars; - this.script = script; - } - - @Override - public SearchScript newInstance(LeafReaderContext context) throws IOException { - LeafSearchLookup leafLookup = lookup.getLeafSearchLookup(context); - - Map ctx = new HashMap<>(leafLookup.asMap()); - if (vars != null) { - ctx.putAll(vars); - } - - return new SearchScript(vars, lookup, context) { - @Override - public Object run() { - return script.apply(ctx); - } - - @Override - public double runAsDouble() { - return ((Number) run()).doubleValue(); - } - - @Override - public void setNextVar(String name, Object value) { - ctx.put(name, value); - } - - @Override - public void setScorer(Scorable scorer) { - ctx.put("_score", new ScoreAccessor(scorer)); - } - - @Override - public void setDocument(int doc) { - leafLookup.setDocument(doc); - } - }; - } - - @Override - public boolean needs_score() { - return true; - } - } - - public static class MockFilterScript implements FilterScript.LeafFactory { private final Function, Object> script; @@ -602,10 +540,10 @@ public double execute(Map params, double[] values) { public class MockScoreScript implements ScoreScript.Factory { - private final Function, Object> scripts; + private final Function, Object> script; - MockScoreScript(Function, Object> scripts) { - this.scripts = scripts; + public MockScoreScript(Function, Object> script) { + this.script = script; } @Override @@ -627,7 +565,7 @@ public double execute() { if (scorerHolder[0] != null) { vars.put("_score", new ScoreAccessor(scorerHolder[0])); } - return ((Number) scripts.apply(vars)).doubleValue(); + return ((Number) script.apply(vars)).doubleValue(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index f60fa610fc4af..198c02829b171 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -20,6 +20,8 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -29,7 +31,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -50,7 +51,7 @@ public class BackgroundIndexer implements AutoCloseable { - private final Logger logger = Loggers.getLogger(getClass()); + private final Logger logger = LogManager.getLogger(getClass()); final Thread[] writers; final CountDownLatch stopLatch; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 82d2c2302e6f1..4b32745b62a9c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -249,7 +249,7 @@ private static void setTestSysProps() { System.setProperty("io.netty.leakDetection.level", "paranoid"); } - protected final Logger logger = Loggers.getLogger(getClass()); + protected final Logger logger = LogManager.getLogger(getClass()); private ThreadContext threadContext; // ----------------------------------------------------------------- diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index 6496894baad17..16533c5c4dec8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -19,6 +19,7 @@ package org.elasticsearch.test; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -57,7 +57,7 @@ */ public final class ExternalTestCluster extends TestCluster { - private static final Logger logger = Loggers.getLogger(ExternalTestCluster.class); + private static final Logger logger = LogManager.getLogger(ExternalTestCluster.class); private static final AtomicInteger counter = new AtomicInteger(); public static final String EXTERNAL_CLUSTER_PREFIX = "external_"; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index facbc6ec84b76..fad2b4e1dff29 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -120,16 +120,16 @@ import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.ParsedBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.ParsedPercentilesBucket; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.ParsedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ParsedExtendedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.derivative.ParsedDerivative; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; +import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative; import java.io.IOException; import java.util.ArrayList; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index ce7e636941296..de4226bf2755b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -28,6 +28,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ElasticsearchException; @@ -57,7 +58,6 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; @@ -174,7 +174,7 @@ */ public final class InternalTestCluster extends TestCluster { - private final Logger logger = Loggers.getLogger(getClass()); + private final Logger logger = LogManager.getLogger(getClass()); public static final int DEFAULT_LOW_NUM_MASTER_NODES = 1; public static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3; diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index a11b70bfa104e..b5aa26a38549e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -20,6 +20,8 @@ package org.elasticsearch.test; import com.carrotsearch.hppc.ObjectArrayList; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; @@ -27,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.repositories.RepositoryMissingException; @@ -46,7 +47,7 @@ */ public abstract class TestCluster implements Closeable { - protected final Logger logger = Loggers.getLogger(getClass()); + protected final Logger logger = LogManager.getLogger(getClass()); private final long seed; protected Random random; diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java index 8054847b642af..d620e7633f287 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java @@ -20,11 +20,12 @@ package org.elasticsearch.test.disruption; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.InternalTestCluster; @@ -48,7 +49,7 @@ */ public class NetworkDisruption implements ServiceDisruptionScheme { - private final Logger logger = Loggers.getLogger(NetworkDisruption.class); + private final Logger logger = LogManager.getLogger(NetworkDisruption.class); private final DisruptedLinks disruptedLinks; private final NetworkLinkDisruptionType networkLinkDisruptionType; diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java index a22994cfa9bc4..69df45958947b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.test.disruption; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.InternalTestCluster; import java.util.Random; @@ -28,7 +28,7 @@ public abstract class SingleNodeDisruption implements ServiceDisruptionScheme { - protected final Logger logger = Loggers.getLogger(getClass()); + protected final Logger logger = LogManager.getLogger(getClass()); protected volatile String disruptedNode; protected volatile InternalTestCluster cluster; diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index 182038d5b0a2f..52b086db338f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.engine; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.AssertingDirectoryReader; import org.apache.lucene.index.DirectoryReader; @@ -28,7 +29,6 @@ import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -66,7 +66,7 @@ public final class MockEngineSupport { private final AtomicBoolean closing = new AtomicBoolean(false); - private final Logger logger = Loggers.getLogger(Engine.class); + private final Logger logger = LogManager.getLogger(Engine.class); private final ShardId shardId; private final QueryCache filterCache; private final QueryCachingPolicy filterCachingPolicy; diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index cddcca59e6cf5..58e126b4bed4a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -19,11 +19,12 @@ package org.elasticsearch.test.junit.listeners; import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; @@ -47,7 +48,7 @@ */ public class ReproduceInfoPrinter extends RunListener { - protected final Logger logger = Loggers.getLogger(ESTestCase.class); + protected final Logger logger = LogManager.getLogger(ESTestCase.class); @Override public void testStarted(Description description) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 91d70b260fe80..a540af459a1a6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -26,6 +26,7 @@ import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -73,6 +74,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -105,25 +107,13 @@ public static Map entityAsMap(Response response) throws IOExcept } /** - * Does the cluster being tested have xpack installed? + * Does any node in the cluster being tested have x-pack installed? */ public static boolean hasXPack() throws IOException { - RestClient client = adminClient(); - if (client == null) { + if (hasXPack == null) { throw new IllegalStateException("must be called inside of a rest test case test"); } - Map response = entityAsMap(client.performRequest(new Request("GET", "_nodes/plugins"))); - Map nodes = (Map) response.get("nodes"); - for (Map.Entry node : nodes.entrySet()) { - Map nodeInfo = (Map) node.getValue(); - for (Object module: (List) nodeInfo.get("modules")) { - Map moduleInfo = (Map) module; - if (moduleInfo.get("name").toString().startsWith("x-pack-")) { - return true; - } - } - } - return false; + return hasXPack; } private static List clusterHosts; @@ -136,12 +126,16 @@ public static boolean hasXPack() throws IOException { * completes */ private static RestClient adminClient; + private static Boolean hasXPack; + private static TreeSet nodeVersions; @Before public void initClient() throws IOException { if (client == null) { assert adminClient == null; assert clusterHosts == null; + assert hasXPack == null; + assert nodeVersions == null; String cluster = System.getProperty("tests.rest.cluster"); if (cluster == null) { throw new RuntimeException("Must specify [tests.rest.cluster] system property with a comma delimited list of [host:port] " @@ -162,10 +156,27 @@ public void initClient() throws IOException { logger.info("initializing REST clients against {}", clusterHosts); client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); adminClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); + + hasXPack = false; + nodeVersions = new TreeSet<>(); + Map response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins"))); + Map nodes = (Map) response.get("nodes"); + for (Map.Entry node : nodes.entrySet()) { + Map nodeInfo = (Map) node.getValue(); + nodeVersions.add(Version.fromString(nodeInfo.get("version").toString())); + for (Object module: (List) nodeInfo.get("modules")) { + Map moduleInfo = (Map) module; + if (moduleInfo.get("name").toString().startsWith("x-pack-")) { + hasXPack = true; + } + } + } } assert client != null; assert adminClient != null; assert clusterHosts != null; + assert hasXPack != null; + assert nodeVersions != null; } /** @@ -195,6 +206,8 @@ public static void closeClients() throws IOException { clusterHosts = null; client = null; adminClient = null; + hasXPack = null; + nodeVersions = null; } } @@ -335,8 +348,6 @@ protected boolean preserveRollupJobsUponCompletion() { } private void wipeCluster() throws Exception { - boolean hasXPack = hasXPack(); - if (preserveIndicesUponCompletion() == false) { // wipe indices try { @@ -577,9 +588,18 @@ protected String getProtocol() { protected RestClient buildClient(Settings settings, HttpHost[] hosts) throws IOException { RestClientBuilder builder = RestClient.builder(hosts); configureClient(builder, settings); + builder.setStrictDeprecationMode(getStrictDeprecationMode()); return builder.build(); } + /** + * Whether the used REST client should return any response containing at + * least one warning header as a failure. + */ + protected boolean getStrictDeprecationMode() { + return true; + } + protected static void configureClient(RestClientBuilder builder, Settings settings) throws IOException { String keystorePath = settings.get(TRUSTSTORE_PATH); if (keystorePath != null) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 856fd2a32ded4..6b400bfb35163 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -19,11 +19,13 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.apache.http.entity.ContentType; import org.apache.http.util.EntityUtils; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; @@ -34,7 +36,6 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.common.CheckedSupplier; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestPath; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -57,7 +58,7 @@ * REST calls. */ public class ClientYamlTestClient implements Closeable { - private static final Logger logger = Loggers.getLogger(ClientYamlTestClient.class); + private static final Logger logger = LogManager.getLogger(ClientYamlTestClient.class); private static final ContentType YAML_CONTENT_TYPE = ContentType.create("application/yaml"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 4061b627cd816..b1337172a5679 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -19,15 +19,16 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.http.HttpEntity; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -47,7 +48,7 @@ */ public class ClientYamlTestExecutionContext { - private static final Logger logger = Loggers.getLogger(ClientYamlTestExecutionContext.class); + private static final Logger logger = LogManager.getLogger(ClientYamlTestExecutionContext.class); private static final XContentType[] STREAMING_CONTENT_TYPES = new XContentType[]{XContentType.JSON, XContentType.SMILE}; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index f76c5423534d7..011da53384d5d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -408,4 +408,9 @@ protected final RestClientBuilder getClientBuilderWithSniffedHosts() throws IOEx configureClient(builder, restClientSettings()); return builder; } + + @Override + protected boolean getStrictDeprecationMode() { + return false; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index cfce0653d31c2..d3fb500ac051a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -46,7 +46,9 @@ public final class Features { "stash_in_path", "stash_path_replace", "warnings", - "yaml")); + "yaml", + "contains" + )); private Features() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java index a5edeb0195b53..6ac78bf6bcfcd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java @@ -19,9 +19,9 @@ package org.elasticsearch.test.rest.yaml; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,7 +42,7 @@ public class Stash implements ToXContentFragment { private static final Pattern EXTENDED_KEY = Pattern.compile("\\$\\{([^}]+)\\}"); private static final Pattern PATH = Pattern.compile("\\$_path"); - private static final Logger logger = Loggers.getLogger(Stash.class); + private static final Logger logger = LogManager.getLogger(Stash.class); public static final Stash EMPTY = new Stash(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ContainsAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ContainsAssertion.java index 9d2d91790c7c2..3241149dfae72 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ContainsAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ContainsAssertion.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -43,7 +43,7 @@ public static ContainsAssertion parse(XContentParser parser) throws IOException return new ContainsAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } - private static final Logger logger = Loggers.getLogger(ContainsAssertion.class); + private static final Logger logger = LogManager.getLogger(ContainsAssertion.class); public ContainsAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index e1346d3f6967d..5fb5c1d003dd4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -19,6 +19,7 @@ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.client.HasAttributeNodeSelector; @@ -28,7 +29,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentLocation; @@ -106,6 +106,8 @@ public static DoSection parse(XContentParser parser) throws IOException { } else if (token.isValue()) { if ("catch".equals(currentFieldName)) { doSection.setCatch(parser.text()); + } else { + throw new ParsingException(parser.getTokenLocation(), "unsupported field [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("warnings".equals(currentFieldName)) { @@ -181,7 +183,7 @@ public static DoSection parse(XContentParser parser) throws IOException { } - private static final Logger logger = Loggers.getLogger(DoSection.class); + private static final Logger logger = LogManager.getLogger(DoSection.class); private final XContentLocation location; private String catchParam; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java index 494d65e05de71..f3518bedd0266 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanAssertion.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -47,7 +47,7 @@ public static GreaterThanAssertion parse(XContentParser parser) throws IOExcepti return new GreaterThanAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } - private static final Logger logger = Loggers.getLogger(GreaterThanAssertion.class); + private static final Logger logger = LogManager.getLogger(GreaterThanAssertion.class); public GreaterThanAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java index 3fd9bf7adfd61..e35fc4450509a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java @@ -19,9 +19,9 @@ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -48,7 +48,7 @@ public static GreaterThanEqualToAssertion parse(XContentParser parser) throws IO return new GreaterThanEqualToAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } - private static final Logger logger = Loggers.getLogger(GreaterThanEqualToAssertion.class); + private static final Logger logger = LogManager.getLogger(GreaterThanEqualToAssertion.class); public GreaterThanEqualToAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java index 56ee603c70f75..276e4e02291ed 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsFalseAssertion.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -41,7 +41,7 @@ public static IsFalseAssertion parse(XContentParser parser) throws IOException { return new IsFalseAssertion(parser.getTokenLocation(), ParserUtils.parseField(parser)); } - private static final Logger logger = Loggers.getLogger(IsFalseAssertion.class); + private static final Logger logger = LogManager.getLogger(IsFalseAssertion.class); public IsFalseAssertion(XContentLocation location, String field) { super(location, field, false); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java index 9b3f37e1f52a0..b4cbe3c496f2d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/IsTrueAssertion.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -42,7 +42,7 @@ public static IsTrueAssertion parse(XContentParser parser) throws IOException { return new IsTrueAssertion(parser.getTokenLocation(), ParserUtils.parseField(parser)); } - private static final Logger logger = Loggers.getLogger(IsTrueAssertion.class); + private static final Logger logger = LogManager.getLogger(IsTrueAssertion.class); public IsTrueAssertion(XContentLocation location, String field) { super(location, field, true); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java index aeecc50b90d41..cad415b40f9d0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LengthAssertion.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -55,7 +55,7 @@ public static LengthAssertion parse(XContentParser parser) throws IOException { return new LengthAssertion(location, stringObjectTuple.v1(), value); } - private static final Logger logger = Loggers.getLogger(LengthAssertion.class); + private static final Logger logger = LogManager.getLogger(LengthAssertion.class); public LengthAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java index 75a1edcf81caa..84461202df4bb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanAssertion.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -48,7 +48,7 @@ public static LessThanAssertion parse(XContentParser parser) throws IOException return new LessThanAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } - private static final Logger logger = Loggers.getLogger(LessThanAssertion.class); + private static final Logger logger = LogManager.getLogger(LessThanAssertion.class); public LessThanAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java index 23b6a1e4efc41..0b11b304ce60c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java @@ -19,9 +19,9 @@ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -48,7 +48,7 @@ public static LessThanOrEqualToAssertion parse(XContentParser parser) throws IOE return new LessThanOrEqualToAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } - private static final Logger logger = Loggers.getLogger(LessThanOrEqualToAssertion.class); + private static final Logger logger = LogManager.getLogger(LessThanOrEqualToAssertion.class); public LessThanOrEqualToAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java index 6ecaae75a8ee2..09f88f42492b6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.NotEqualMessageBuilder; @@ -48,7 +48,7 @@ public static MatchAssertion parse(XContentParser parser) throws IOException { return new MatchAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } - private static final Logger logger = Loggers.getLogger(MatchAssertion.class); + private static final Logger logger = LogManager.getLogger(MatchAssertion.class); public MatchAssertion(XContentLocation location, String field, Object expectedValue) { super(location, field, expectedValue); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 3b64f00084ec8..85a654c4cac36 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -116,7 +116,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { protected abstract MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake); protected int channelsPerNodeConnection() { - return 13; + // This is a customized profile for this test case. + return 6; } @Override @@ -125,9 +126,17 @@ public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(getClass().getName()); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - serviceA = buildService("TS_A", version0, clusterSettings); // this one supports dynamic tracer updates + Settings connectionSettings = Settings.builder() + .put(TransportService.CONNECTIONS_PER_NODE_RECOVERY.getKey(), 1) + .put(TransportService.CONNECTIONS_PER_NODE_BULK.getKey(), 1) + .put(TransportService.CONNECTIONS_PER_NODE_REG.getKey(), 2) + .put(TransportService.CONNECTIONS_PER_NODE_STATE.getKey(), 1) + .put(TransportService.CONNECTIONS_PER_NODE_PING.getKey(), 1) + .build(); + + serviceA = buildService("TS_A", version0, clusterSettings, connectionSettings); // this one supports dynamic tracer updates nodeA = serviceA.getLocalNode(); - serviceB = buildService("TS_B", version1, null); // this one doesn't support dynamic tracer updates + serviceB = buildService("TS_B", version1, null, connectionSettings); // this one doesn't support dynamic tracer updates nodeB = serviceB.getLocalNode(); // wait till all nodes are properly connected and the event has been sent, so tests in this class // will not get this callback called on the connections done in this setup @@ -174,7 +183,12 @@ private MockTransportService buildService(final String name, final Version versi } protected MockTransportService buildService(final String name, final Version version, ClusterSettings clusterSettings) { - return buildService(name, version, clusterSettings, Settings.EMPTY, true, true); + return buildService(name, version, clusterSettings, Settings.EMPTY); + } + + protected MockTransportService buildService(final String name, final Version version, ClusterSettings clusterSettings, + Settings settings) { + return buildService(name, version, clusterSettings, settings, true, true); } @Override @@ -1999,7 +2013,7 @@ protected String handleRequest(TcpChannel mockChannel, String profileName, Strea assertEquals("handshake failed", exception.getCause().getMessage()); } - ConnectionProfile connectionProfile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY); + ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY); try (TransportService service = buildService("TS_TPC", Version.CURRENT, null); TcpTransport.NodeChannels connection = originalTransport.openConnection( new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0), diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java index 861d574b3465e..b61182415eeb4 100644 --- a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -20,13 +20,43 @@ package org.elasticsearch.upgrades; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase { private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); + @Before + public void init() throws IOException { + assertThat("we don't need this branch if we aren't compatible with 6.0", + Version.CURRENT.minimumIndexCompatibilityVersion().onOrBefore(Version.V_6_0_0), equalTo(true)); + if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_7_0_0_alpha1)) { + XContentBuilder template = jsonBuilder(); + template.startObject(); + { + template.field("index_patterns", "*"); + template.field("order", "0"); + template.startObject("settings"); + template.field("number_of_shards", 5); + template.endObject(); + } + template.endObject(); + Request createTemplate = new Request("PUT", "/_template/template"); + createTemplate.setJsonEntity(Strings.toString(template)); + client().performRequest(createTemplate); + } + } + public final boolean isRunningAgainstOldCluster() { return runningAgainstOldCluster; } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 777ff74611885..0af7fe6845026 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -52,7 +53,7 @@ public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase { - public void testWarningHeaders() throws IOException { + public void testWarningHeaders() { { final DoSection section = new DoSection(new XContentLocation(1, 1)); @@ -358,6 +359,17 @@ public void testParseDoSectionWithCatch() throws Exception { assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); } + public void testUnsupportedTopLevelField() throws Exception { + parser = createParser(YamlXContent.yamlXContent, + "max_concurrent_shard_requests: 1" + ); + + ParsingException e = expectThrows(ParsingException.class, () -> DoSection.parse(parser)); + assertThat(e.getMessage(), is("unsupported field [max_concurrent_shard_requests]")); + parser.nextToken(); + parser.nextToken(); + } + public void testParseDoSectionWithHeaders() throws Exception { parser = createParser(YamlXContent.yamlXContent, "headers:\n" + diff --git a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java index 4084d08b2e806..e8b5f38b88df1 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java @@ -50,7 +50,7 @@ public Version executeHandshake(DiscoveryNode node, TcpChannel mockChannel, Time } }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet()); + MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java index bebe50752f4e1..10f089e855a5d 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java @@ -78,7 +78,7 @@ protected Version getCurrentVersion() { }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, Collections.emptySet()); + MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; } diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java index c9c2e5079338f..d6927f1f611bd 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java @@ -21,7 +21,6 @@ import java.security.SecureRandom; import static org.elasticsearch.license.CryptUtils.writeEncryptedPrivateKey; -import static org.elasticsearch.license.CryptUtils.writeEncryptedPublicKey; public class KeyPairGeneratorTool extends LoggingAwareCommand { @@ -65,7 +64,7 @@ protected void execute(Terminal terminal, OptionSet options) throws Exception { KeyPair keyPair = keyGen.generateKeyPair(); Files.write(privateKeyPath, writeEncryptedPrivateKey(keyPair.getPrivate())); - Files.write(publicKeyPath, writeEncryptedPublicKey(keyPair.getPublic())); + Files.write(publicKeyPath, keyPair.getPublic().getEncoded()); terminal.println( Terminal.Verbosity.VERBOSE, diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index ea8aa89777703..fe5341ef99372 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -1,5 +1,4 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask -import org.elasticsearch.gradle.BuildPlugin evaluationDependsOn(xpackModule('core')) @@ -25,7 +24,6 @@ task internalClusterTest(type: RandomizedTestingTask, group: JavaBasePlugin.VERIFICATION_GROUP, description: 'Java fantasy integration tests', dependsOn: test.dependsOn) { - configure(BuildPlugin.commonTestConfig(project)) classpath = project.test.classpath testClassesDirs = project.test.testClassesDirs include '**/*IT.class' diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle index dc44f8f753d2e..f408e6a78b603 100644 --- a/x-pack/plugin/ccr/qa/build.gradle +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -1,5 +1,12 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.build' +test.enabled = false + +dependencies { + compile project(':test:framework') +} + subprojects { project.tasks.withType(RestIntegTestTask) { final File xPackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') diff --git a/x-pack/plugin/ccr/qa/chain/build.gradle b/x-pack/plugin/ccr/qa/chain/build.gradle index 7b3e20f86ceca..f93feb4a66a1b 100644 --- a/x-pack/plugin/ccr/qa/chain/build.gradle +++ b/x-pack/plugin/ccr/qa/chain/build.gradle @@ -5,6 +5,7 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') + testCompile project(':x-pack:plugin:ccr:qa') } task leaderClusterTest(type: RestIntegTestTask) { diff --git a/x-pack/plugin/ccr/qa/chain/src/test/java/org/elasticsearch/xpack/ccr/ChainIT.java b/x-pack/plugin/ccr/qa/chain/src/test/java/org/elasticsearch/xpack/ccr/ChainIT.java index 1a8a8e0096f09..e5a37aa829bbf 100644 --- a/x-pack/plugin/ccr/qa/chain/src/test/java/org/elasticsearch/xpack/ccr/ChainIT.java +++ b/x-pack/plugin/ccr/qa/chain/src/test/java/org/elasticsearch/xpack/ccr/ChainIT.java @@ -6,34 +6,10 @@ package org.elasticsearch.xpack.ccr; -import org.apache.http.HttpHost; -import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.test.rest.ESRestTestCase; -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; - -public class ChainIT extends ESRestTestCase { - - private final String targetCluster = System.getProperty("tests.target_cluster"); - - @Override - protected boolean preserveClusterUponCompletion() { - return true; - } +public class ChainIT extends ESCCRRestTestCase { public void testFollowIndex() throws Exception { final int numDocs = 128; @@ -60,23 +36,23 @@ public void testFollowIndex() throws Exception { index(client(), leaderIndexName, Integer.toString(i), "field", i, "filtered_field", "true"); } refresh(leaderIndexName); - verifyDocuments(leaderIndexName, numDocs); + verifyDocuments(leaderIndexName, numDocs, "filtered_field:true"); } else if ("middle".equals(targetCluster)) { logger.info("Running against middle cluster"); followIndex("leader_cluster", leaderIndexName, middleIndexName); - assertBusy(() -> verifyDocuments(middleIndexName, numDocs)); + assertBusy(() -> verifyDocuments(middleIndexName, numDocs, "filtered_field:true")); try (RestClient leaderClient = buildLeaderClient()) { int id = numDocs; index(leaderClient, leaderIndexName, Integer.toString(id), "field", id, "filtered_field", "true"); index(leaderClient, leaderIndexName, Integer.toString(id + 1), "field", id + 1, "filtered_field", "true"); index(leaderClient, leaderIndexName, Integer.toString(id + 2), "field", id + 2, "filtered_field", "true"); } - assertBusy(() -> verifyDocuments(middleIndexName, numDocs + 3)); + assertBusy(() -> verifyDocuments(middleIndexName, numDocs + 3, "filtered_field:true")); } else if ("follow".equals(targetCluster)) { logger.info("Running against follow cluster"); final String followIndexName = "follow"; followIndex("middle_cluster", middleIndexName, followIndexName); - assertBusy(() -> verifyDocuments(followIndexName, numDocs + 3)); + assertBusy(() -> verifyDocuments(followIndexName, numDocs + 3, "filtered_field:true")); try (RestClient leaderClient = buildLeaderClient()) { int id = numDocs + 3; @@ -86,82 +62,13 @@ public void testFollowIndex() throws Exception { } try (RestClient middleClient = buildMiddleClient()) { - assertBusy(() -> verifyDocuments(middleIndexName, numDocs + 6, middleClient)); + assertBusy(() -> verifyDocuments(middleIndexName, numDocs + 6, "filtered_field:true", middleClient)); } - assertBusy(() -> verifyDocuments(followIndexName, numDocs + 6)); + assertBusy(() -> verifyDocuments(followIndexName, numDocs + 6, "filtered_field:true")); } else { fail("unexpected target cluster [" + targetCluster + "]"); } } - private static void index(RestClient client, String index, String id, Object... fields) throws IOException { - XContentBuilder document = jsonBuilder().startObject(); - for (int i = 0; i < fields.length; i += 2) { - document.field((String) fields[i], fields[i + 1]); - } - document.endObject(); - final Request request = new Request("POST", "/" + index + "/_doc/" + id); - request.setJsonEntity(Strings.toString(document)); - assertOK(client.performRequest(request)); - } - - private static void refresh(String index) throws IOException { - assertOK(client().performRequest(new Request("POST", "/" + index + "/_refresh"))); - } - - private static void followIndex(String leaderCluster, String leaderIndex, String followIndex) throws IOException { - final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow"); - request.setJsonEntity( - "{\"leader_cluster\": \"" + leaderCluster + "\", \"leader_index\": \"" + leaderIndex + "\", \"poll_timeout\": \"10ms\"}"); - assertOK(client().performRequest(request)); - } - - private static void verifyDocuments(String index, int expectedNumDocs) throws IOException { - verifyDocuments(index, expectedNumDocs, client()); - } - - private static void verifyDocuments(final String index, final int expectedNumDocs, final RestClient client) throws IOException { - final Request request = new Request("GET", "/" + index + "/_search"); - request.addParameter("size", Integer.toString(expectedNumDocs)); - request.addParameter("sort", "field:asc"); - request.addParameter("q", "filtered_field:true"); - Map response = toMap(client.performRequest(request)); - - int numDocs = (int) XContentMapValues.extractValue("hits.total", response); - assertThat(numDocs, equalTo(expectedNumDocs)); - - List hits = (List) XContentMapValues.extractValue("hits.hits", response); - assertThat(hits.size(), equalTo(expectedNumDocs)); - for (int i = 0; i < expectedNumDocs; i++) { - int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); - assertThat(i, equalTo(value)); - } - } - - private static Map toMap(Response response) throws IOException { - return toMap(EntityUtils.toString(response.getEntity())); - } - - private static Map toMap(String response) { - return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); - } - - private RestClient buildLeaderClient() throws IOException { - assert "leader".equals(targetCluster) == false; - return buildClient(System.getProperty("tests.leader_host")); - } - - private RestClient buildMiddleClient() throws IOException { - assert "middle".equals(targetCluster) == false; - return buildClient(System.getProperty("tests.middle_host")); - } - - private RestClient buildClient(final String url) throws IOException { - int portSeparator = url.lastIndexOf(':'); - HttpHost httpHost = new HttpHost(url.substring(0, portSeparator), - Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); - return buildClient(Settings.EMPTY, new HttpHost[]{httpHost}); - } - } diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/build.gradle index 845c9df533dba..7f1dd2c3211a2 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/build.gradle @@ -5,6 +5,7 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') + testCompile project(':x-pack:plugin:ccr:qa:') } task leaderClusterTest(type: RestIntegTestTask) { @@ -17,7 +18,7 @@ leaderClusterTestCluster { } leaderClusterTestRunner { - systemProperty 'tests.is_leader_cluster', 'true' + systemProperty 'tests.target_cluster', 'leader' } task writeJavaPolicy { @@ -49,7 +50,7 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" - systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.target_cluster', 'follow' systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log" finalizedBy 'leaderClusterTestCluster#stop' diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java index bc3d846343a02..074701c7313c8 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java @@ -9,9 +9,7 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.test.rest.ESRestTestCase; import java.nio.file.Files; import java.util.Iterator; @@ -22,36 +20,21 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; -public class CcrMultiClusterLicenseIT extends ESRestTestCase { - - private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); - - @Override - protected boolean preserveClusterUponCompletion() { - return true; - } - - public void testResumeFollow() { - if (runningAgainstLeaderCluster == false) { - final Request request = new Request("POST", "/follower/_ccr/resume_follow"); - request.setJsonEntity("{\"leader_cluster\": \"leader_cluster\", \"leader_index\": \"leader\"}"); - assertNonCompliantLicense(request); - } - } +public class CcrMultiClusterLicenseIT extends ESCCRRestTestCase { public void testFollow() { - if (runningAgainstLeaderCluster == false) { + if ("follow".equals(targetCluster)) { final Request request = new Request("PUT", "/follower/_ccr/follow"); - request.setJsonEntity("{\"leader_cluster\": \"leader_cluster\", \"leader_index\": \"leader\"}"); + request.setJsonEntity("{\"remote_cluster\": \"leader_cluster\", \"leader_index\": \"leader\"}"); assertNonCompliantLicense(request); } } public void testAutoFollow() throws Exception { assumeFalse("windows is the worst", Constants.WINDOWS); - if (runningAgainstLeaderCluster == false) { + if ("follow".equals(targetCluster)) { final Request request = new Request("PUT", "/_ccr/auto_follow/test_pattern"); - request.setJsonEntity("{\"leader_index_patterns\":[\"*\"], \"leader_cluster\": \"leader_cluster\"}"); + request.setJsonEntity("{\"leader_index_patterns\":[\"*\"], \"remote_cluster\": \"leader_cluster\"}"); client().performRequest(request); // parse the logs and ensure that the auto-coordinator skipped coordination on the leader cluster diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle index 418c4e6d249e9..f005a71b1653a 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle @@ -5,6 +5,7 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') + testCompile project(':x-pack:plugin:ccr:qa') } task leaderClusterTest(type: RestIntegTestTask) { @@ -35,7 +36,7 @@ leaderClusterTestCluster { } leaderClusterTestRunner { - systemProperty 'tests.is_leader_cluster', 'true' + systemProperty 'tests.target_cluster', 'leader' } task followClusterTest(type: RestIntegTestTask) {} @@ -66,7 +67,7 @@ followClusterTestCluster { } followClusterTestRunner { - systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.target_cluster', 'follow' systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' } diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index 6d5ca4559fed9..a8d9441f67d47 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -5,39 +5,24 @@ */ package org.elasticsearch.xpack.ccr; -import org.apache.http.HttpHost; -import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.rest.ESRestTestCase; -import java.io.IOException; import java.util.List; import java.util.Map; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; -public class FollowIndexSecurityIT extends ESRestTestCase { - - private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); +public class FollowIndexSecurityIT extends ESCCRRestTestCase { @Override protected Settings restClientSettings() { @@ -55,16 +40,11 @@ protected Settings restAdminSettings() { .build(); } - @Override - protected boolean preserveClusterUponCompletion() { - return true; - } - public void testFollowIndex() throws Exception { final int numDocs = 16; final String allowedIndex = "allowed-index"; final String unallowedIndex = "unallowed-index"; - if (runningAgainstLeaderCluster) { + if ("leader".equals(targetCluster)) { logger.info("Running against leader cluster"); Settings indexSettings = Settings.builder().put("index.soft_deletes.enabled", true).build(); createIndex(allowedIndex, indexSettings); @@ -78,10 +58,10 @@ public void testFollowIndex() throws Exception { index(unallowedIndex, Integer.toString(i), "field", i); } refresh(allowedIndex); - verifyDocuments(adminClient(), allowedIndex, numDocs); + verifyDocuments(allowedIndex, numDocs, "*:*"); } else { - follow(allowedIndex, allowedIndex); - assertBusy(() -> verifyDocuments(client(), allowedIndex, numDocs)); + followIndex(client(), "leader_cluster", allowedIndex, allowedIndex); + assertBusy(() -> verifyDocuments(allowedIndex, numDocs, "*:*")); assertThat(countCcrNodeTasks(), equalTo(1)); assertBusy(() -> verifyCcrMonitoring(allowedIndex, allowedIndex)); assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/pause_follow"))); @@ -93,7 +73,7 @@ public void testFollowIndex() throws Exception { assertThat(countCcrNodeTasks(), equalTo(0)); }); - resumeFollow(allowedIndex, allowedIndex); + resumeFollow(allowedIndex); assertThat(countCcrNodeTasks(), equalTo(1)); assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/pause_follow"))); // Make sure that there are no other ccr relates operations running: @@ -106,54 +86,59 @@ public void testFollowIndex() throws Exception { assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_close"))); assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/unfollow"))); - Exception e = expectThrows(ResponseException.class, () -> resumeFollow(allowedIndex, allowedIndex)); + Exception e = expectThrows(ResponseException.class, () -> resumeFollow(allowedIndex)); assertThat(e.getMessage(), containsString("follow index [" + allowedIndex + "] does not have ccr metadata")); // User does not have manage_follow_index index privilege for 'unallowedIndex': - e = expectThrows(ResponseException.class, () -> follow(unallowedIndex, unallowedIndex)); + e = expectThrows(ResponseException.class, () -> followIndex(client(), "leader_cluster", unallowedIndex, unallowedIndex)); assertThat(e.getMessage(), containsString("action [indices:admin/xpack/ccr/put_follow] is unauthorized for user [test_ccr]")); // Verify that the follow index has not been created and no node tasks are running - assertThat(indexExists(adminClient(), unallowedIndex), is(false)); + assertThat(indexExists(unallowedIndex), is(false)); assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); // User does have manage_follow_index index privilege on 'allowed' index, // but not read / monitor roles on 'disallowed' index: - e = expectThrows(ResponseException.class, () -> follow(unallowedIndex, allowedIndex)); + e = expectThrows(ResponseException.class, () -> followIndex(client(), "leader_cluster", unallowedIndex, allowedIndex)); assertThat(e.getMessage(), containsString("insufficient privileges to follow index [unallowed-index], " + "privilege for action [indices:monitor/stats] is missing, " + "privilege for action [indices:data/read/xpack/ccr/shard_changes] is missing")); // Verify that the follow index has not been created and no node tasks are running - assertThat(indexExists(adminClient(), unallowedIndex), is(false)); + assertThat(indexExists(unallowedIndex), is(false)); assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); - e = expectThrows(ResponseException.class, () -> resumeFollow(unallowedIndex, unallowedIndex)); + followIndex(adminClient(), "leader_cluster", unallowedIndex, unallowedIndex); + pauseFollow(adminClient(), unallowedIndex); + + e = expectThrows(ResponseException.class, () -> resumeFollow(unallowedIndex)); assertThat(e.getMessage(), containsString("insufficient privileges to follow index [unallowed-index], " + "privilege for action [indices:monitor/stats] is missing, " + "privilege for action [indices:data/read/xpack/ccr/shard_changes] is missing")); - assertThat(indexExists(adminClient(), unallowedIndex), is(false)); assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); e = expectThrows(ResponseException.class, () -> client().performRequest(new Request("POST", "/" + unallowedIndex + "/_ccr/unfollow"))); assertThat(e.getMessage(), containsString("action [indices:admin/xpack/ccr/unfollow] is unauthorized for user [test_ccr]")); + assertOK(adminClient().performRequest(new Request("POST", "/" + unallowedIndex + "/_close"))); + assertOK(adminClient().performRequest(new Request("POST", "/" + unallowedIndex + "/_ccr/unfollow"))); + assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); } } public void testAutoFollowPatterns() throws Exception { - assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster); + assumeFalse("Test should only run when both clusters are running", "leader".equals(targetCluster)); String allowedIndex = "logs-eu-20190101"; String disallowedIndex = "logs-us-20190101"; { Request request = new Request("PUT", "/_ccr/auto_follow/test_pattern"); - request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"], \"leader_cluster\": \"leader_cluster\"}"); + request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"], \"remote_cluster\": \"leader_cluster\"}"); Exception e = expectThrows(ResponseException.class, () -> assertOK(client().performRequest(request))); assertThat(e.getMessage(), containsString("insufficient privileges to follow index [logs-*]")); } Request request = new Request("PUT", "/_ccr/auto_follow/test_pattern"); - request.setJsonEntity("{\"leader_index_patterns\": [\"logs-eu-*\"], \"leader_cluster\": \"leader_cluster\"}"); + request.setJsonEntity("{\"leader_index_patterns\": [\"logs-eu-*\"], \"remote_cluster\": \"leader_cluster\"}"); assertOK(client().performRequest(request)); try (RestClient leaderClient = buildLeaderClient()) { @@ -176,9 +161,9 @@ public void testAutoFollowPatterns() throws Exception { assertBusy(() -> { ensureYellow(allowedIndex); - verifyDocuments(adminClient(), allowedIndex, 5); + verifyDocuments(allowedIndex, 5, "*:*"); }); - assertThat(indexExists(adminClient(), disallowedIndex), is(false)); + assertThat(indexExists(disallowedIndex), is(false)); assertBusy(() -> { verifyCcrMonitoring(allowedIndex, allowedIndex); verifyAutoFollowMonitoring(); @@ -187,185 +172,7 @@ public void testAutoFollowPatterns() throws Exception { // Cleanup by deleting auto follow pattern and pause following: request = new Request("DELETE", "/_ccr/auto_follow/test_pattern"); assertOK(client().performRequest(request)); - pauseFollow(allowedIndex); - } - - private int countCcrNodeTasks() throws IOException { - final Request request = new Request("GET", "/_tasks"); - request.addParameter("detailed", "true"); - Map rsp1 = toMap(adminClient().performRequest(request)); - Map nodes = (Map) rsp1.get("nodes"); - assertThat(nodes.size(), equalTo(1)); - Map node = (Map) nodes.values().iterator().next(); - Map nodeTasks = (Map) node.get("tasks"); - int numNodeTasks = 0; - for (Map.Entry entry : nodeTasks.entrySet()) { - Map nodeTask = (Map) entry.getValue(); - String action = (String) nodeTask.get("action"); - if (action.startsWith("xpack/ccr/shard_follow_task")) { - numNodeTasks++; - } - } - return numNodeTasks; - } - - private static void index(String index, String id, Object... fields) throws IOException { - index(adminClient(), index, id, fields); - } - - private static void index(RestClient client, String index, String id, Object... fields) throws IOException { - XContentBuilder document = jsonBuilder().startObject(); - for (int i = 0; i < fields.length; i += 2) { - document.field((String) fields[i], fields[i + 1]); - } - document.endObject(); - final Request request = new Request("POST", "/" + index + "/_doc/" + id); - request.setJsonEntity(Strings.toString(document)); - assertOK(client.performRequest(request)); - } - - private static void refresh(String index) throws IOException { - assertOK(adminClient().performRequest(new Request("POST", "/" + index + "/_refresh"))); - } - - private static void resumeFollow(String leaderIndex, String followIndex) throws IOException { - final Request request = new Request("POST", "/" + followIndex + "/_ccr/resume_follow"); - request.setJsonEntity("{\"leader_cluster\": \"leader_cluster\", \"leader_index\": \"" + leaderIndex + - "\", \"poll_timeout\": \"10ms\"}"); - assertOK(client().performRequest(request)); - } - - private static void follow(String leaderIndex, String followIndex) throws IOException { - final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow"); - request.setJsonEntity("{\"leader_cluster\": \"leader_cluster\", \"leader_index\": \"" + leaderIndex + - "\", \"poll_timeout\": \"10ms\"}"); - assertOK(client().performRequest(request)); - } - - void verifyDocuments(RestClient client, String index, int expectedNumDocs) throws IOException { - final Request request = new Request("GET", "/" + index + "/_search"); - request.addParameter("pretty", "true"); - request.addParameter("size", Integer.toString(expectedNumDocs)); - request.addParameter("sort", "field:asc"); - Map response = toMap(client.performRequest(request)); - - int numDocs = (int) XContentMapValues.extractValue("hits.total", response); - assertThat(numDocs, equalTo(expectedNumDocs)); - - List hits = (List) XContentMapValues.extractValue("hits.hits", response); - assertThat(hits.size(), equalTo(expectedNumDocs)); - for (int i = 0; i < expectedNumDocs; i++) { - int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); - assertThat(i, equalTo(value)); - } - } - - private static Map toMap(Response response) throws IOException { - return toMap(EntityUtils.toString(response.getEntity())); - } - - private static Map toMap(String response) { - return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); - } - - protected static void createIndex(String name, Settings settings) throws IOException { - createIndex(name, settings, ""); - } - - protected static void createIndex(String name, Settings settings, String mapping) throws IOException { - final Request request = new Request("PUT", "/" + name); - request.setJsonEntity("{ \"settings\": " + Strings.toString(settings) + ", \"mappings\" : {" + mapping + "} }"); - assertOK(adminClient().performRequest(request)); - } - - private static void ensureYellow(String index) throws IOException { - Request request = new Request("GET", "/_cluster/health/" + index); - request.addParameter("wait_for_status", "yellow"); - request.addParameter("wait_for_no_relocating_shards", "true"); - request.addParameter("wait_for_no_initializing_shards", "true"); - request.addParameter("timeout", "70s"); - request.addParameter("level", "shards"); - adminClient().performRequest(request); - } - - private RestClient buildLeaderClient() throws IOException { - assert runningAgainstLeaderCluster == false; - String leaderUrl = System.getProperty("tests.leader_host"); - int portSeparator = leaderUrl.lastIndexOf(':'); - HttpHost httpHost = new HttpHost(leaderUrl.substring(0, portSeparator), - Integer.parseInt(leaderUrl.substring(portSeparator + 1)), getProtocol()); - return buildClient(restAdminSettings(), new HttpHost[]{httpHost}); - } - - private static boolean indexExists(RestClient client, String index) throws IOException { - Response response = client.performRequest(new Request("HEAD", "/" + index)); - return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); - } - - private static void pauseFollow(String followIndex) throws IOException { - assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/pause_follow"))); - } - - private static void verifyCcrMonitoring(String expectedLeaderIndex, String expectedFollowerIndex) throws IOException { - Request request = new Request("GET", "/.monitoring-*/_search"); - request.setJsonEntity("{\"query\": {\"term\": {\"ccr_stats.leader_index\": \"" + expectedLeaderIndex + "\"}}}"); - Map response; - try { - response = toMap(adminClient().performRequest(request)); - } catch (ResponseException e) { - throw new AssertionError("error while searching", e); - } - - int numberOfOperationsReceived = 0; - int numberOfOperationsIndexed = 0; - - List hits = (List) XContentMapValues.extractValue("hits.hits", response); - assertThat(hits.size(), greaterThanOrEqualTo(1)); - - for (int i = 0; i < hits.size(); i++) { - Map hit = (Map) hits.get(i); - String leaderIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.leader_index", hit); - assertThat(leaderIndex, endsWith(expectedLeaderIndex)); - - final String followerIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.follower_index", hit); - assertThat(followerIndex, equalTo(expectedFollowerIndex)); - - int foundNumberOfOperationsReceived = - (int) XContentMapValues.extractValue("_source.ccr_stats.operations_received", hit); - numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); - int foundNumberOfOperationsIndexed = - (int) XContentMapValues.extractValue("_source.ccr_stats.number_of_operations_indexed", hit); - numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); - } - - assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); - assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); - } - - private static void verifyAutoFollowMonitoring() throws IOException { - Request request = new Request("GET", "/.monitoring-*/_search"); - request.setJsonEntity("{\"query\": {\"term\": {\"type\": \"ccr_auto_follow_stats\"}}}"); - Map response; - try { - response = toMap(adminClient().performRequest(request)); - } catch (ResponseException e) { - throw new AssertionError("error while searching", e); - } - - int numberOfSuccessfulFollowIndices = 0; - - List hits = (List) XContentMapValues.extractValue("hits.hits", response); - assertThat(hits.size(), greaterThanOrEqualTo(1)); - - for (int i = 0; i < hits.size(); i++) { - Map hit = (Map) hits.get(i); - - int foundNumberOfOperationsReceived = - (int) XContentMapValues.extractValue("_source.ccr_auto_follow_stats.number_of_successful_follow_indices", hit); - numberOfSuccessfulFollowIndices = Math.max(numberOfSuccessfulFollowIndices, foundNumberOfOperationsReceived); - } - - assertThat(numberOfSuccessfulFollowIndices, greaterThanOrEqualTo(1)); + pauseFollow(client(), allowedIndex); } } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index b3b6372384888..3e3661aae1a23 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -5,6 +5,7 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') + testCompile project(':x-pack:plugin:ccr:qa') } task leaderClusterTest(type: RestIntegTestTask) { @@ -18,7 +19,7 @@ leaderClusterTestCluster { } leaderClusterTestRunner { - systemProperty 'tests.is_leader_cluster', 'true' + systemProperty 'tests.target_cluster', 'leader' } task followClusterTest(type: RestIntegTestTask) {} @@ -33,7 +34,7 @@ followClusterTestCluster { } followClusterTestRunner { - systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.target_cluster', 'follow' systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 8e50b3697f6cd..ed3a03f0b17f5 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -5,44 +5,23 @@ */ package org.elasticsearch.xpack.ccr; -import org.apache.http.HttpHost; -import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.test.rest.ESRestTestCase; -import java.io.IOException; -import java.util.List; import java.util.Map; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -public class FollowIndexIT extends ESRestTestCase { - - private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); - - @Override - protected boolean preserveClusterUponCompletion() { - return true; - } +public class FollowIndexIT extends ESCCRRestTestCase { public void testFollowIndex() throws Exception { final int numDocs = 128; final String leaderIndexName = "test_index1"; - if (runningAgainstLeaderCluster) { + if ("leader".equals(targetCluster)) { logger.info("Running against leader cluster"); String mapping = ""; if (randomBoolean()) { // randomly do source filtering on indexing @@ -63,48 +42,48 @@ public void testFollowIndex() throws Exception { index(client(), leaderIndexName, Integer.toString(i), "field", i, "filtered_field", "true"); } refresh(leaderIndexName); - verifyDocuments(leaderIndexName, numDocs); + verifyDocuments(leaderIndexName, numDocs, "filtered_field:true"); } else { logger.info("Running against follow cluster"); final String followIndexName = "test_index2"; followIndex(leaderIndexName, followIndexName); - assertBusy(() -> verifyDocuments(followIndexName, numDocs)); + assertBusy(() -> verifyDocuments(followIndexName, numDocs, "filtered_field:true")); // unfollow and then follow and then index a few docs in leader index: pauseFollow(followIndexName); - resumeFollow(leaderIndexName, followIndexName); + resumeFollow(followIndexName); try (RestClient leaderClient = buildLeaderClient()) { int id = numDocs; index(leaderClient, leaderIndexName, Integer.toString(id), "field", id, "filtered_field", "true"); index(leaderClient, leaderIndexName, Integer.toString(id + 1), "field", id + 1, "filtered_field", "true"); index(leaderClient, leaderIndexName, Integer.toString(id + 2), "field", id + 2, "filtered_field", "true"); } - assertBusy(() -> verifyDocuments(followIndexName, numDocs + 3)); + assertBusy(() -> verifyDocuments(followIndexName, numDocs + 3, "filtered_field:true")); assertBusy(() -> verifyCcrMonitoring(leaderIndexName, followIndexName)); pauseFollow(followIndexName); assertOK(client().performRequest(new Request("POST", "/" + followIndexName + "/_close"))); assertOK(client().performRequest(new Request("POST", "/" + followIndexName + "/_ccr/unfollow"))); - Exception e = expectThrows(ResponseException.class, () -> resumeFollow(leaderIndexName, followIndexName)); + Exception e = expectThrows(ResponseException.class, () -> resumeFollow(followIndexName)); assertThat(e.getMessage(), containsString("follow index [" + followIndexName + "] does not have ccr metadata")); } } public void testFollowNonExistingLeaderIndex() throws Exception { - assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster); - ResponseException e = expectThrows(ResponseException.class, () -> resumeFollow("non-existing-index", "non-existing-index")); - assertThat(e.getMessage(), containsString("no such index")); + assumeFalse("Test should only run when both clusters are running", "leader".equals(targetCluster)); + ResponseException e = expectThrows(ResponseException.class, () -> resumeFollow("non-existing-index")); + assertThat(e.getMessage(), containsString("no such index [non-existing-index]")); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); e = expectThrows(ResponseException.class, () -> followIndex("non-existing-index", "non-existing-index")); - assertThat(e.getMessage(), containsString("no such index")); + assertThat(e.getMessage(), containsString("no such index [non-existing-index]")); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); } public void testAutoFollowPatterns() throws Exception { - assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster); + assumeFalse("Test should only run when both clusters are running", "leader".equals(targetCluster)); Request request = new Request("PUT", "/_ccr/auto_follow/test_pattern"); - request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"], \"leader_cluster\": \"leader_cluster\"}"); + request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"], \"remote_cluster\": \"leader_cluster\"}"); assertOK(client().performRequest(request)); try (RestClient leaderClient = buildLeaderClient()) { @@ -128,7 +107,7 @@ public void testAutoFollowPatterns() throws Exception { assertThat(response.get("number_of_successful_follow_indices"), equalTo(1)); ensureYellow("logs-20190101"); - verifyDocuments("logs-20190101", 5); + verifyDocuments("logs-20190101", 5, "filtered_field:true"); }); assertBusy(() -> { verifyCcrMonitoring("logs-20190101", "logs-20190101"); @@ -136,144 +115,4 @@ public void testAutoFollowPatterns() throws Exception { }); } - private static void index(RestClient client, String index, String id, Object... fields) throws IOException { - XContentBuilder document = jsonBuilder().startObject(); - for (int i = 0; i < fields.length; i += 2) { - document.field((String) fields[i], fields[i + 1]); - } - document.endObject(); - final Request request = new Request("POST", "/" + index + "/_doc/" + id); - request.setJsonEntity(Strings.toString(document)); - assertOK(client.performRequest(request)); - } - - private static void refresh(String index) throws IOException { - assertOK(client().performRequest(new Request("POST", "/" + index + "/_refresh"))); - } - - private static void resumeFollow(String leaderIndex, String followIndex) throws IOException { - final Request request = new Request("POST", "/" + followIndex + "/_ccr/resume_follow"); - request.setJsonEntity("{\"leader_cluster\": \"leader_cluster\", \"leader_index\": \"" + leaderIndex + - "\", \"poll_timeout\": \"10ms\"}"); - assertOK(client().performRequest(request)); - } - - private static void followIndex(String leaderIndex, String followIndex) throws IOException { - final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow"); - request.setJsonEntity("{\"leader_cluster\": \"leader_cluster\", \"leader_index\": \"" + leaderIndex + - "\", \"poll_timeout\": \"10ms\"}"); - assertOK(client().performRequest(request)); - } - - private static void pauseFollow(String followIndex) throws IOException { - assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/pause_follow"))); - } - - private static void verifyDocuments(String index, int expectedNumDocs) throws IOException { - final Request request = new Request("GET", "/" + index + "/_search"); - request.addParameter("size", Integer.toString(expectedNumDocs)); - request.addParameter("sort", "field:asc"); - request.addParameter("q", "filtered_field:true"); - Map response = toMap(client().performRequest(request)); - - int numDocs = (int) XContentMapValues.extractValue("hits.total", response); - assertThat(numDocs, equalTo(expectedNumDocs)); - - List hits = (List) XContentMapValues.extractValue("hits.hits", response); - assertThat(hits.size(), equalTo(expectedNumDocs)); - for (int i = 0; i < expectedNumDocs; i++) { - int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); - assertThat(i, equalTo(value)); - } - } - - private static void verifyCcrMonitoring(final String expectedLeaderIndex, final String expectedFollowerIndex) throws IOException { - Request request = new Request("GET", "/.monitoring-*/_search"); - request.setJsonEntity("{\"query\": {\"term\": {\"ccr_stats.leader_index\": \"" + expectedLeaderIndex + "\"}}}"); - Map response; - try { - response = toMap(client().performRequest(request)); - } catch (ResponseException e) { - throw new AssertionError("error while searching", e); - } - - int numberOfOperationsReceived = 0; - int numberOfOperationsIndexed = 0; - - List hits = (List) XContentMapValues.extractValue("hits.hits", response); - assertThat(hits.size(), greaterThanOrEqualTo(1)); - - for (int i = 0; i < hits.size(); i++) { - Map hit = (Map) hits.get(i); - String leaderIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.leader_index", hit); - assertThat(leaderIndex, endsWith(expectedLeaderIndex)); - - final String followerIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.follower_index", hit); - assertThat(followerIndex, equalTo(expectedFollowerIndex)); - - int foundNumberOfOperationsReceived = - (int) XContentMapValues.extractValue("_source.ccr_stats.operations_received", hit); - numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); - int foundNumberOfOperationsIndexed = - (int) XContentMapValues.extractValue("_source.ccr_stats.number_of_operations_indexed", hit); - numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); - } - - assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); - assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); - } - - private static void verifyAutoFollowMonitoring() throws IOException { - Request request = new Request("GET", "/.monitoring-*/_search"); - request.setJsonEntity("{\"query\": {\"term\": {\"type\": \"ccr_auto_follow_stats\"}}}"); - Map response; - try { - response = toMap(client().performRequest(request)); - } catch (ResponseException e) { - throw new AssertionError("error while searching", e); - } - - int numberOfSuccessfulFollowIndices = 0; - - List hits = (List) XContentMapValues.extractValue("hits.hits", response); - assertThat(hits.size(), greaterThanOrEqualTo(1)); - - for (int i = 0; i < hits.size(); i++) { - Map hit = (Map) hits.get(i); - - int foundNumberOfOperationsReceived = - (int) XContentMapValues.extractValue("_source.ccr_auto_follow_stats.number_of_successful_follow_indices", hit); - numberOfSuccessfulFollowIndices = Math.max(numberOfSuccessfulFollowIndices, foundNumberOfOperationsReceived); - } - - assertThat(numberOfSuccessfulFollowIndices, greaterThanOrEqualTo(1)); - } - - private static Map toMap(Response response) throws IOException { - return toMap(EntityUtils.toString(response.getEntity())); - } - - private static Map toMap(String response) { - return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); - } - - private static void ensureYellow(String index) throws IOException { - Request request = new Request("GET", "/_cluster/health/" + index); - request.addParameter("wait_for_status", "yellow"); - request.addParameter("wait_for_no_relocating_shards", "true"); - request.addParameter("wait_for_no_initializing_shards", "true"); - request.addParameter("timeout", "70s"); - request.addParameter("level", "shards"); - client().performRequest(request); - } - - private RestClient buildLeaderClient() throws IOException { - assert runningAgainstLeaderCluster == false; - String leaderUrl = System.getProperty("tests.leader_host"); - int portSeparator = leaderUrl.lastIndexOf(':'); - HttpHost httpHost = new HttpHost(leaderUrl.substring(0, portSeparator), - Integer.parseInt(leaderUrl.substring(portSeparator + 1)), getProtocol()); - return buildClient(Settings.EMPTY, new HttpHost[]{httpHost}); - } - } diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml index 357fc7e1f5678..4d4026f46a472 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml @@ -23,23 +23,23 @@ ccr.put_auto_follow_pattern: name: my_pattern body: - leader_cluster: local + remote_cluster: local leader_index_patterns: ['logs-*'] - max_concurrent_read_batches: 2 + max_outstanding_read_requests: 2 - is_true: acknowledged - do: ccr.get_auto_follow_pattern: name: my_pattern - - match: { my_pattern.leader_cluster: 'local' } + - match: { my_pattern.remote_cluster: 'local' } - match: { my_pattern.leader_index_patterns: ['logs-*'] } - - match: { my_pattern.max_concurrent_read_batches: 2 } + - match: { my_pattern.max_outstanding_read_requests: 2 } - do: ccr.get_auto_follow_pattern: {} - - match: { my_pattern.leader_cluster: 'local' } + - match: { my_pattern.remote_cluster: 'local' } - match: { my_pattern.leader_index_patterns: ['logs-*'] } - - match: { my_pattern.max_concurrent_read_batches: 2 } + - match: { my_pattern.max_outstanding_read_requests: 2 } - do: ccr.delete_auto_follow_pattern: diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml index 9289be50b21f1..d50bc52bc3620 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml @@ -38,7 +38,7 @@ ccr.follow: index: bar body: - leader_cluster: local + remote_cluster: local leader_index: foo - is_true: follow_index_created - is_true: follow_index_shards_acked @@ -52,9 +52,7 @@ - do: ccr.resume_follow: index: bar - body: - leader_cluster: local - leader_index: foo + body: {} - is_true: acknowledged - do: diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml index ba784689dc535..97c538b60bc4e 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml @@ -37,7 +37,7 @@ ccr.follow: index: bar body: - leader_cluster: local + remote_cluster: local leader_index: foo - is_true: follow_index_created - is_true: follow_index_shards_acked @@ -56,21 +56,21 @@ - gte: { indices.0.shards.0.follower_global_checkpoint: -1 } - gte: { indices.0.shards.0.follower_max_seq_no: -1 } - gte: { indices.0.shards.0.last_requested_seq_no: -1 } - - gte: { indices.0.shards.0.number_of_concurrent_reads: 0 } - - match: { indices.0.shards.0.number_of_concurrent_writes: 0 } - - match: { indices.0.shards.0.number_of_queued_writes: 0 } - - gte: { indices.0.shards.0.mapping_version: 0 } - - gte: { indices.0.shards.0.total_fetch_time_millis: 0 } - - gte: { indices.0.shards.0.number_of_successful_fetches: 0 } - - gte: { indices.0.shards.0.number_of_failed_fetches: 0 } - - match: { indices.0.shards.0.operations_received: 0 } - - match: { indices.0.shards.0.total_transferred_bytes: 0 } - - match: { indices.0.shards.0.total_index_time_millis: 0 } - - match: { indices.0.shards.0.number_of_successful_bulk_operations: 0 } - - match: { indices.0.shards.0.number_of_failed_bulk_operations: 0 } - - match: { indices.0.shards.0.number_of_operations_indexed: 0 } - - length: { indices.0.shards.0.fetch_exceptions: 0 } - - gte: { indices.0.shards.0.time_since_last_fetch_millis: -1 } + - gte: { indices.0.shards.0.outstanding_read_requests: 0 } + - match: { indices.0.shards.0.outstanding_write_requests: 0 } + - match: { indices.0.shards.0.write_buffer_operation_count: 0 } + - gte: { indices.0.shards.0.follower_mapping_version: 0 } + - gte: { indices.0.shards.0.total_read_time_millis: 0 } + - gte: { indices.0.shards.0.successful_read_requests: 0 } + - gte: { indices.0.shards.0.failed_read_requests: 0 } + - match: { indices.0.shards.0.operations_read: 0 } + - match: { indices.0.shards.0.bytes_read: 0 } + - match: { indices.0.shards.0.total_write_time_millis: 0 } + - match: { indices.0.shards.0.successful_write_requests: 0 } + - match: { indices.0.shards.0.failed_write_requests: 0 } + - match: { indices.0.shards.0.operations_written: 0 } + - length: { indices.0.shards.0.read_exceptions: 0 } + - gte: { indices.0.shards.0.time_since_last_read_millis: -1 } - do: ccr.pause_follow: diff --git a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java new file mode 100644 index 0000000000000..14780702fc4a4 --- /dev/null +++ b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java @@ -0,0 +1,245 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.apache.http.HttpHost; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class ESCCRRestTestCase extends ESRestTestCase { + + protected final String targetCluster = System.getProperty("tests.target_cluster"); + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + protected static void index(String index, String id, Object... fields) throws IOException { + index(adminClient(), index, id, fields); + } + + protected static void index(RestClient client, String index, String id, Object... fields) throws IOException { + XContentBuilder document = jsonBuilder().startObject(); + for (int i = 0; i < fields.length; i += 2) { + document.field((String) fields[i], fields[i + 1]); + } + document.endObject(); + final Request request = new Request("POST", "/" + index + "/_doc/" + id); + request.setJsonEntity(Strings.toString(document)); + assertOK(client.performRequest(request)); + } + + protected static void refresh(String index) throws IOException { + assertOK(adminClient().performRequest(new Request("POST", "/" + index + "/_refresh"))); + } + + protected static void resumeFollow(String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/resume_follow"); + request.setJsonEntity("{\"read_poll_timeout\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + protected static void followIndex(String leaderIndex, String followIndex) throws IOException { + followIndex("leader_cluster", leaderIndex, followIndex); + } + + protected static void followIndex(String leaderCluster, String leaderIndex, String followIndex) throws IOException { + followIndex(client(), leaderCluster, leaderIndex, followIndex); + } + + protected static void followIndex(RestClient client, String leaderCluster, String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow"); + request.setJsonEntity("{\"remote_cluster\": \"" + leaderCluster + "\", \"leader_index\": \"" + leaderIndex + + "\", \"read_poll_timeout\": \"10ms\"}"); + assertOK(client.performRequest(request)); + } + + protected static void pauseFollow(String followIndex) throws IOException { + pauseFollow(client(), followIndex); + } + + protected static void pauseFollow(RestClient client, String followIndex) throws IOException { + assertOK(client.performRequest(new Request("POST", "/" + followIndex + "/_ccr/pause_follow"))); + } + + protected static void verifyDocuments(final String index, final int expectedNumDocs, final String query) throws IOException { + verifyDocuments(index, expectedNumDocs, query, adminClient()); + } + + protected static void verifyDocuments(final String index, + final int expectedNumDocs, + final String query, + final RestClient client) throws IOException { + final Request request = new Request("GET", "/" + index + "/_search"); + request.addParameter("size", Integer.toString(expectedNumDocs)); + request.addParameter("sort", "field:asc"); + request.addParameter("q", query); + Map response = toMap(client.performRequest(request)); + + int numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertThat(numDocs, equalTo(expectedNumDocs)); + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), equalTo(expectedNumDocs)); + for (int i = 0; i < expectedNumDocs; i++) { + int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); + assertThat(i, equalTo(value)); + } + } + + protected static void verifyCcrMonitoring(final String expectedLeaderIndex, final String expectedFollowerIndex) throws IOException { + Request request = new Request("GET", "/.monitoring-*/_search"); + request.setJsonEntity("{\"query\": {\"term\": {\"ccr_stats.leader_index\": \"" + expectedLeaderIndex + "\"}}}"); + Map response; + try { + response = toMap(adminClient().performRequest(request)); + } catch (ResponseException e) { + throw new AssertionError("error while searching", e); + } + + int numberOfOperationsReceived = 0; + int numberOfOperationsIndexed = 0; + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), greaterThanOrEqualTo(1)); + + for (int i = 0; i < hits.size(); i++) { + Map hit = (Map) hits.get(i); + String leaderIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.leader_index", hit); + assertThat(leaderIndex, endsWith(expectedLeaderIndex)); + + final String followerIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.follower_index", hit); + assertThat(followerIndex, equalTo(expectedFollowerIndex)); + + int foundNumberOfOperationsReceived = + (int) XContentMapValues.extractValue("_source.ccr_stats.operations_read", hit); + numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); + int foundNumberOfOperationsIndexed = + (int) XContentMapValues.extractValue("_source.ccr_stats.operations_written", hit); + numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); + } + + assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); + assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); + } + + protected static void verifyAutoFollowMonitoring() throws IOException { + Request request = new Request("GET", "/.monitoring-*/_search"); + request.setJsonEntity("{\"query\": {\"term\": {\"type\": \"ccr_auto_follow_stats\"}}}"); + Map response; + try { + response = toMap(adminClient().performRequest(request)); + } catch (ResponseException e) { + throw new AssertionError("error while searching", e); + } + + int numberOfSuccessfulFollowIndices = 0; + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), greaterThanOrEqualTo(1)); + + for (int i = 0; i < hits.size(); i++) { + Map hit = (Map) hits.get(i); + + int foundNumberOfOperationsReceived = + (int) XContentMapValues.extractValue("_source.ccr_auto_follow_stats.number_of_successful_follow_indices", hit); + numberOfSuccessfulFollowIndices = Math.max(numberOfSuccessfulFollowIndices, foundNumberOfOperationsReceived); + } + + assertThat(numberOfSuccessfulFollowIndices, greaterThanOrEqualTo(1)); + } + + protected static Map toMap(Response response) throws IOException { + return toMap(EntityUtils.toString(response.getEntity())); + } + + protected static Map toMap(String response) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); + } + + protected static void ensureYellow(String index) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + index); + request.addParameter("wait_for_status", "yellow"); + request.addParameter("wait_for_no_relocating_shards", "true"); + request.addParameter("wait_for_no_initializing_shards", "true"); + request.addParameter("timeout", "70s"); + request.addParameter("level", "shards"); + adminClient().performRequest(request); + } + + protected int countCcrNodeTasks() throws IOException { + final Request request = new Request("GET", "/_tasks"); + request.addParameter("detailed", "true"); + Map rsp1 = toMap(adminClient().performRequest(request)); + Map nodes = (Map) rsp1.get("nodes"); + assertThat(nodes.size(), equalTo(1)); + Map node = (Map) nodes.values().iterator().next(); + Map nodeTasks = (Map) node.get("tasks"); + int numNodeTasks = 0; + for (Map.Entry entry : nodeTasks.entrySet()) { + Map nodeTask = (Map) entry.getValue(); + String action = (String) nodeTask.get("action"); + if (action.startsWith("xpack/ccr/shard_follow_task")) { + numNodeTasks++; + } + } + return numNodeTasks; + } + + protected static void createIndex(String name, Settings settings) throws IOException { + createIndex(name, settings, ""); + } + + protected static void createIndex(String name, Settings settings, String mapping) throws IOException { + final Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{ \"settings\": " + Strings.toString(settings) + ", \"mappings\" : {" + mapping + "} }"); + assertOK(adminClient().performRequest(request)); + } + + protected static boolean indexExists(String index) throws IOException { + Response response = adminClient().performRequest(new Request("HEAD", "/" + index)); + return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); + } + + protected RestClient buildLeaderClient() throws IOException { + assert "leader".equals(targetCluster) == false; + return buildClient(System.getProperty("tests.leader_host")); + } + + protected RestClient buildMiddleClient() throws IOException { + assert "middle".equals(targetCluster) == false; + return buildClient(System.getProperty("tests.middle_host")); + } + + private RestClient buildClient(final String url) throws IOException { + int portSeparator = url.lastIndexOf(':'); + HttpHost httpHost = new HttpHost(url.substring(0, portSeparator), + Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); + return buildClient(restAdminSettings(), new HttpHost[]{httpHost}); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 1c1cade2484a7..68a6310dcaa97 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -97,6 +97,8 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E public static final String CCR_CUSTOM_METADATA_KEY = "ccr"; public static final String CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS = "leader_index_shard_history_uuids"; public static final String CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY = "leader_index_uuid"; + public static final String CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY = "leader_index_name"; + public static final String CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY = "remote_cluster_name"; private final boolean enabled; private final Settings settings; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index a18ec3bf6c42e..b32ed829cf42c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; @@ -159,7 +160,7 @@ private void doAutoFollow() { @Override void getLeaderClusterState(final Map headers, - final String leaderClusterAlias, + final String remoteCluster, final BiConsumer handler) { final ClusterStateRequest request = new ClusterStateRequest(); request.clear(); @@ -168,7 +169,7 @@ void getLeaderClusterState(final Map headers, ccrLicenseChecker.checkRemoteClusterLicenseAndFetchClusterState( client, headers, - leaderClusterAlias, + remoteCluster, request, e -> handler.accept(null, e), leaderClusterState -> handler.accept(leaderClusterState, null)); @@ -176,11 +177,10 @@ void getLeaderClusterState(final Map headers, @Override void createAndFollow(Map headers, - ResumeFollowAction.Request followRequest, + PutFollowAction.Request request, Runnable successHandler, Consumer failureHandler) { Client followerClient = CcrLicenseChecker.wrapClient(client, headers); - PutFollowAction.Request request = new PutFollowAction.Request(followRequest); followerClient.execute( PutFollowAction.INSTANCE, request, @@ -246,14 +246,14 @@ void autoFollowIndices() { final int slot = i; final String autoFollowPattenName = entry.getKey(); final AutoFollowPattern autoFollowPattern = entry.getValue(); - final String leaderCluster = autoFollowPattern.getLeaderCluster(); + final String remoteCluster = autoFollowPattern.getRemoteCluster(); Map headers = autoFollowMetadata.getHeaders().get(autoFollowPattenName); - getLeaderClusterState(headers, leaderCluster, (leaderClusterState, e) -> { + getLeaderClusterState(headers, remoteCluster, (leaderClusterState, e) -> { if (leaderClusterState != null) { assert e == null; final List followedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(autoFollowPattenName); - final List leaderIndicesToFollow = getLeaderIndicesToFollow(leaderCluster, autoFollowPattern, + final List leaderIndicesToFollow = getLeaderIndicesToFollow(remoteCluster, autoFollowPattern, leaderClusterState, followerClusterState, followedIndices); if (leaderIndicesToFollow.isEmpty()) { finalise(slot, new AutoFollowResult(autoFollowPattenName)); @@ -261,12 +261,12 @@ void autoFollowIndices() { List> patternsForTheSameLeaderCluster = autoFollowMetadata.getPatterns() .entrySet().stream() .filter(item -> autoFollowPattenName.equals(item.getKey()) == false) - .filter(item -> leaderCluster.equals(item.getValue().getLeaderCluster())) + .filter(item -> remoteCluster.equals(item.getValue().getRemoteCluster())) .map(item -> new Tuple<>(item.getKey(), item.getValue())) .collect(Collectors.toList()); Consumer resultHandler = result -> finalise(slot, result); - checkAutoFollowPattern(autoFollowPattenName, leaderCluster, autoFollowPattern, leaderIndicesToFollow, headers, + checkAutoFollowPattern(autoFollowPattenName, remoteCluster, autoFollowPattern, leaderIndicesToFollow, headers, patternsForTheSameLeaderCluster, resultHandler); } } else { @@ -278,7 +278,7 @@ void autoFollowIndices() { } private void checkAutoFollowPattern(String autoFollowPattenName, - String clusterAlias, + String leaderCluster, AutoFollowPattern autoFollowPattern, List leaderIndicesToFollow, Map headers, @@ -302,7 +302,7 @@ private void checkAutoFollowPattern(String autoFollowPattenName, resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList())); } } else { - followLeaderIndex(autoFollowPattenName, clusterAlias, indexToFollow, autoFollowPattern, headers, error -> { + followLeaderIndex(autoFollowPattenName, leaderCluster, indexToFollow, autoFollowPattern, headers, error -> { results.set(slot, new Tuple<>(indexToFollow, error)); if (leaderIndicesCountDown.countDown()) { resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList())); @@ -314,7 +314,7 @@ private void checkAutoFollowPattern(String autoFollowPattenName, } private void followLeaderIndex(String autoFollowPattenName, - String clusterAlias, + String remoteCluster, Index indexToFollow, AutoFollowPattern pattern, Map headers, @@ -322,17 +322,23 @@ private void followLeaderIndex(String autoFollowPattenName, final String leaderIndexName = indexToFollow.getName(); final String followIndexName = getFollowerIndexName(pattern, leaderIndexName); - ResumeFollowAction.Request request = new ResumeFollowAction.Request(); - request.setLeaderCluster(clusterAlias); + ResumeFollowAction.Request followRequest = new ResumeFollowAction.Request(); + followRequest.setFollowerIndex(followIndexName); + followRequest.setMaxReadRequestOperationCount(pattern.getMaxReadRequestOperationCount()); + followRequest.setMaxReadRequestSize(pattern.getMaxReadRequestSize()); + followRequest.setMaxOutstandingReadRequests(pattern.getMaxOutstandingReadRequests()); + followRequest.setMaxWriteRequestOperationCount(pattern.getMaxWriteRequestOperationCount()); + followRequest.setMaxWriteRequestSize(pattern.getMaxWriteRequestSize()); + followRequest.setMaxOutstandingWriteRequests(pattern.getMaxOutstandingWriteRequests()); + followRequest.setMaxWriteBufferCount(pattern.getMaxWriteBufferCount()); + followRequest.setMaxWriteBufferSize(pattern.getMaxWriteBufferSize()); + followRequest.setMaxRetryDelay(pattern.getMaxRetryDelay()); + followRequest.setReadPollTimeout(pattern.getPollTimeout()); + + PutFollowAction.Request request = new PutFollowAction.Request(); + request.setRemoteCluster(remoteCluster); request.setLeaderIndex(indexToFollow.getName()); - request.setFollowerIndex(followIndexName); - request.setMaxBatchOperationCount(pattern.getMaxBatchOperationCount()); - request.setMaxConcurrentReadBatches(pattern.getMaxConcurrentReadBatches()); - request.setMaxBatchSize(pattern.getMaxBatchSize()); - request.setMaxConcurrentWriteBatches(pattern.getMaxConcurrentWriteBatches()); - request.setMaxWriteBufferSize(pattern.getMaxWriteBufferSize()); - request.setMaxRetryDelay(pattern.getMaxRetryDelay()); - request.setPollTimeout(pattern.getPollTimeout()); + request.setFollowRequest(followRequest); // Execute if the create and follow api call succeeds: Runnable successHandler = () -> { @@ -355,7 +361,7 @@ private void finalise(int slot, AutoFollowResult result) { } } - static List getLeaderIndicesToFollow(String clusterAlias, + static List getLeaderIndicesToFollow(String remoteCluster, AutoFollowPattern autoFollowPattern, ClusterState leaderClusterState, ClusterState followerClusterState, @@ -368,7 +374,9 @@ static List getLeaderIndicesToFollow(String clusterAlias, // has a leader index uuid custom metadata entry that matches with uuid of leaderIndexMetaData variable // If so then handle it differently: not follow it, but just add an entry to // AutoFollowMetadata#followedLeaderIndexUUIDs - leaderIndicesToFollow.add(leaderIndexMetaData.getIndex()); + if (leaderIndexMetaData.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false)) { + leaderIndicesToFollow.add(leaderIndexMetaData.getIndex()); + } } } } @@ -407,18 +415,18 @@ static Function recordLeaderIndexAsFollowFunction(St * Fetch the cluster state from the leader with the specified cluster alias * * @param headers the client headers - * @param leaderClusterAlias the cluster alias of the leader + * @param remoteCluster the name of the leader cluster * @param handler the callback to invoke */ abstract void getLeaderClusterState( Map headers, - String leaderClusterAlias, + String remoteCluster, BiConsumer handler ); abstract void createAndFollow( Map headers, - ResumeFollowAction.Request followRequest, + PutFollowAction.Request followRequest, Runnable successHandler, Consumer failureHandler ); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ResponseHandler.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ResponseHandler.java new file mode 100644 index 0000000000000..6d28de050234c --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ResponseHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +final class ResponseHandler { + + private final AtomicInteger counter; + private final AtomicReferenceArray responses; + private final ActionListener listener; + + ResponseHandler(int numRequests, ActionListener listener) { + this.counter = new AtomicInteger(numRequests); + this.responses = new AtomicReferenceArray<>(numRequests); + this.listener = listener; + } + + ActionListener getActionListener(final int requestId) { + return new ActionListener() { + + @Override + public void onResponse(T response) { + responses.set(requestId, response); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(requestId, e); + finalizeResponse(); + } + }; + } + + private void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Exception) response); + } + } + } + + if (error == null) { + listener.onResponse(new AcknowledgedResponse(true)); + } else { + listener.onFailure(error); + } + } + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 611d31978692c..cf54a236a0451 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -65,8 +65,8 @@ public static class Request extends SingleShardRequest { private int maxOperationCount; private ShardId shardId; private String expectedHistoryUUID; - private TimeValue pollTimeout = TransportResumeFollowAction.DEFAULT_POLL_TIMEOUT; - private ByteSizeValue maxBatchSize = TransportResumeFollowAction.DEFAULT_MAX_BATCH_SIZE; + private TimeValue pollTimeout = TransportResumeFollowAction.DEFAULT_READ_POLL_TIMEOUT; + private ByteSizeValue maxBatchSize = TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE; private long relativeStartNanos; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index b156a41896a10..8c302344ad86d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -28,6 +28,8 @@ import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.NodeDisconnectedException; +import org.elasticsearch.transport.NodeNotConnectedException; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -68,21 +70,22 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { private long lastRequestedSeqNo; private long followerGlobalCheckpoint = 0; private long followerMaxSeqNo = 0; - private int numConcurrentReads = 0; - private int numConcurrentWrites = 0; + private int numOutstandingReads = 0; + private int numOutstandingWrites = 0; private long currentMappingVersion = 0; - private long totalFetchTookTimeMillis = 0; - private long totalFetchTimeMillis = 0; - private long numberOfSuccessfulFetches = 0; - private long numberOfFailedFetches = 0; - private long operationsReceived = 0; - private long totalTransferredBytes = 0; - private long totalIndexTimeMillis = 0; - private long numberOfSuccessfulBulkOperations = 0; - private long numberOfFailedBulkOperations = 0; - private long numberOfOperationsIndexed = 0; + private long totalReadRemoteExecTimeMillis = 0; + private long totalReadTimeMillis = 0; + private long successfulReadRequests = 0; + private long failedReadRequests = 0; + private long operationsRead = 0; + private long bytesRead = 0; + private long totalWriteTimeMillis = 0; + private long successfulWriteRequests = 0; + private long failedWriteRequests = 0; + private long operationWritten = 0; private long lastFetchTime = -1; private final Queue buffer = new PriorityQueue<>(Comparator.comparing(Translog.Operation::seqNo)); + private long bufferSizeInBytes = 0; private final LinkedHashMap> fetchExceptions; private volatile ElasticsearchException fatalException; @@ -101,7 +104,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { this.fetchExceptions = new LinkedHashMap>() { @Override protected boolean removeEldestEntry(final Map.Entry> eldest) { - return size() > params.getMaxConcurrentReadBatches(); + return size() > params.getMaxOutstandingReadRequests(); } }; } @@ -126,12 +129,12 @@ void start( } // updates follower mapping, this gets us the leader mapping version and makes sure that leader and follower mapping are identical - updateMapping(mappingVersion -> { + updateMapping(followerMappingVersion -> { synchronized (ShardFollowNodeTask.this) { - currentMappingVersion = mappingVersion; + currentMappingVersion = followerMappingVersion; } - LOGGER.info("{} Started to follow leader shard {}, followGlobalCheckPoint={}, mappingVersion={}", - params.getFollowShardId(), params.getLeaderShardId(), followerGlobalCheckpoint, mappingVersion); + LOGGER.info("{} Started to follow leader shard {}, followGlobalCheckPoint={}, followerMappingVersion={}", + params.getFollowShardId(), params.getLeaderShardId(), followerGlobalCheckpoint, followerMappingVersion); coordinateReads(); }); } @@ -144,45 +147,49 @@ synchronized void coordinateReads() { LOGGER.trace("{} coordinate reads, lastRequestedSeqNo={}, leaderGlobalCheckpoint={}", params.getFollowShardId(), lastRequestedSeqNo, leaderGlobalCheckpoint); - final int maxBatchOperationCount = params.getMaxBatchOperationCount(); + final int maxReadRequestOperationCount = params.getMaxReadRequestOperationCount(); while (hasReadBudget() && lastRequestedSeqNo < leaderGlobalCheckpoint) { final long from = lastRequestedSeqNo + 1; - final long maxRequiredSeqNo = Math.min(leaderGlobalCheckpoint, from + maxBatchOperationCount - 1); - final int requestBatchCount; - if (numConcurrentReads == 0) { + final long maxRequiredSeqNo = Math.min(leaderGlobalCheckpoint, from + maxReadRequestOperationCount - 1); + final int requestOpCount; + if (numOutstandingReads == 0) { // This is the only request, we can optimistically fetch more documents if possible but not enforce max_required_seqno. - requestBatchCount = maxBatchOperationCount; + requestOpCount = maxReadRequestOperationCount; } else { - requestBatchCount = Math.toIntExact(maxRequiredSeqNo - from + 1); + requestOpCount = Math.toIntExact(maxRequiredSeqNo - from + 1); } - assert 0 < requestBatchCount && requestBatchCount <= maxBatchOperationCount : "request_batch_count=" + requestBatchCount; + assert 0 < requestOpCount && requestOpCount <= maxReadRequestOperationCount : "read_request_operation_count=" + requestOpCount; LOGGER.trace("{}[{} ongoing reads] read from_seqno={} max_required_seqno={} batch_count={}", - params.getFollowShardId(), numConcurrentReads, from, maxRequiredSeqNo, requestBatchCount); - numConcurrentReads++; - sendShardChangesRequest(from, requestBatchCount, maxRequiredSeqNo); + params.getFollowShardId(), numOutstandingReads, from, maxRequiredSeqNo, requestOpCount); + numOutstandingReads++; + sendShardChangesRequest(from, requestOpCount, maxRequiredSeqNo); lastRequestedSeqNo = maxRequiredSeqNo; } - if (numConcurrentReads == 0 && hasReadBudget()) { + if (numOutstandingReads == 0 && hasReadBudget()) { assert lastRequestedSeqNo == leaderGlobalCheckpoint; // We sneak peek if there is any thing new in the leader. // If there is we will happily accept - numConcurrentReads++; + numOutstandingReads++; long from = lastRequestedSeqNo + 1; - LOGGER.trace("{}[{}] peek read [{}]", params.getFollowShardId(), numConcurrentReads, from); - sendShardChangesRequest(from, maxBatchOperationCount, lastRequestedSeqNo); + LOGGER.trace("{}[{}] peek read [{}]", params.getFollowShardId(), numOutstandingReads, from); + sendShardChangesRequest(from, maxReadRequestOperationCount, lastRequestedSeqNo); } } private boolean hasReadBudget() { assert Thread.holdsLock(this); - if (numConcurrentReads >= params.getMaxConcurrentReadBatches()) { + if (numOutstandingReads >= params.getMaxOutstandingReadRequests()) { LOGGER.trace("{} no new reads, maximum number of concurrent reads have been reached [{}]", - params.getFollowShardId(), numConcurrentReads); + params.getFollowShardId(), numOutstandingReads); + return false; + } + if (bufferSizeInBytes >= params.getMaxWriteBufferSize().getBytes()) { + LOGGER.trace("{} no new reads, buffer size limit has been reached [{}]", params.getFollowShardId(), bufferSizeInBytes); return false; } - if (buffer.size() > params.getMaxWriteBufferSize()) { - LOGGER.trace("{} no new reads, buffer limit has been reached [{}]", params.getFollowShardId(), buffer.size()); + if (buffer.size() > params.getMaxWriteBufferCount()) { + LOGGER.trace("{} no new reads, buffer count limit has been reached [{}]", params.getFollowShardId(), buffer.size()); return false; } return true; @@ -196,18 +203,19 @@ private synchronized void coordinateWrites() { while (hasWriteBudget() && buffer.isEmpty() == false) { long sumEstimatedSize = 0L; - int length = Math.min(params.getMaxBatchOperationCount(), buffer.size()); + int length = Math.min(params.getMaxWriteRequestOperationCount(), buffer.size()); List ops = new ArrayList<>(length); for (int i = 0; i < length; i++) { Translog.Operation op = buffer.remove(); ops.add(op); sumEstimatedSize += op.estimateSize(); - if (sumEstimatedSize > params.getMaxBatchSize().getBytes()) { + if (sumEstimatedSize > params.getMaxWriteRequestSize().getBytes()) { break; } } - numConcurrentWrites++; - LOGGER.trace("{}[{}] write [{}/{}] [{}]", params.getFollowShardId(), numConcurrentWrites, ops.get(0).seqNo(), + bufferSizeInBytes -= sumEstimatedSize; + numOutstandingWrites++; + LOGGER.trace("{}[{}] write [{}/{}] [{}]", params.getFollowShardId(), numOutstandingWrites, ops.get(0).seqNo(), ops.get(ops.size() - 1).seqNo(), ops.size()); sendBulkShardOperationsRequest(ops, leaderMaxSeqNoOfUpdatesOrDeletes, new AtomicInteger(0)); } @@ -215,9 +223,9 @@ private synchronized void coordinateWrites() { private boolean hasWriteBudget() { assert Thread.holdsLock(this); - if (numConcurrentWrites >= params.getMaxConcurrentWriteBatches()) { + if (numOutstandingWrites >= params.getMaxOutstandingWriteRequests()) { LOGGER.trace("{} maximum number of concurrent writes have been reached [{}]", - params.getFollowShardId(), numConcurrentWrites); + params.getFollowShardId(), numOutstandingWrites); return false; } return true; @@ -239,11 +247,11 @@ private void sendShardChangesRequest(long from, int maxOperationCount, long maxR fetchExceptions.remove(from); if (response.getOperations().length > 0) { // do not count polls against fetch stats - totalFetchTookTimeMillis += response.getTookInMillis(); - totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); - numberOfSuccessfulFetches++; - operationsReceived += response.getOperations().length; - totalTransferredBytes += + totalReadRemoteExecTimeMillis += response.getTookInMillis(); + totalReadTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + successfulReadRequests++; + operationsRead += response.getOperations().length; + bytesRead += Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::estimateSize).sum(); } } @@ -251,8 +259,8 @@ private void sendShardChangesRequest(long from, int maxOperationCount, long maxR }, e -> { synchronized (ShardFollowNodeTask.this) { - totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); - numberOfFailedFetches++; + totalReadTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + failedReadRequests++; fetchExceptions.put(from, Tuple.tuple(retryCounter, ExceptionsHelper.convertToElastic(e))); } handleFailure(e, retryCounter, () -> sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, retryCounter)); @@ -279,7 +287,12 @@ synchronized void innerHandleReadResponse(long from, long maxRequiredSeqNo, Shar } else { assert response.getOperations()[0].seqNo() == from : "first operation is not what we asked for. From is [" + from + "], got " + response.getOperations()[0]; - buffer.addAll(Arrays.asList(response.getOperations())); + List operations = Arrays.asList(response.getOperations()); + long operationsSize = operations.stream() + .mapToLong(Translog.Operation::estimateSize) + .sum(); + buffer.addAll(operations); + bufferSizeInBytes += operationsSize; final long maxSeqNo = response.getOperations()[response.getOperations().length - 1].seqNo(); assert maxSeqNo == Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::seqNo).max().getAsLong(); @@ -297,7 +310,7 @@ synchronized void innerHandleReadResponse(long from, long maxRequiredSeqNo, Shar sendShardChangesRequest(newFromSeqNo, newSize, maxRequiredSeqNo); } else { // read is completed, decrement - numConcurrentReads--; + numOutstandingReads--; coordinateReads(); } } @@ -309,16 +322,16 @@ private void sendBulkShardOperationsRequest(List operations, innerSendBulkShardOperationsRequest(followerHistoryUUID, operations, leaderMaxSeqNoOfUpdatesOrDeletes, response -> { synchronized (ShardFollowNodeTask.this) { - totalIndexTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); - numberOfSuccessfulBulkOperations++; - numberOfOperationsIndexed += operations.size(); + totalWriteTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + successfulWriteRequests++; + operationWritten += operations.size(); } handleWriteResponse(response); }, e -> { synchronized (ShardFollowNodeTask.this) { - totalIndexTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); - numberOfFailedBulkOperations++; + totalWriteTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + failedWriteRequests++; } handleFailure(e, retryCounter, () -> sendBulkShardOperationsRequest(operations, leaderMaxSeqNoOfUpdatesOrDeletes, retryCounter)); @@ -329,8 +342,8 @@ private void sendBulkShardOperationsRequest(List operations, private synchronized void handleWriteResponse(final BulkShardOperationsResponse response) { this.followerGlobalCheckpoint = Math.max(this.followerGlobalCheckpoint, response.getGlobalCheckpoint()); this.followerMaxSeqNo = Math.max(this.followerMaxSeqNo, response.getMaxSeqNo()); - numConcurrentWrites--; - assert numConcurrentWrites >= 0; + numOutstandingWrites--; + assert numOutstandingWrites >= 0; coordinateWrites(); // In case that buffer has more ops than is allowed then reads may all have been stopped, @@ -367,10 +380,11 @@ private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable tas int currentRetry = retryCounter.incrementAndGet(); LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying [{}]", params.getFollowShardId(), currentRetry), e); - long delay = computeDelay(currentRetry, params.getPollTimeout().getMillis()); + long delay = computeDelay(currentRetry, params.getReadPollTimeout().getMillis()); scheduler.accept(TimeValue.timeValueMillis(delay), task); } else { fatalException = ExceptionsHelper.convertToElastic(e); + LOGGER.warn("shard follow task encounter non-retryable error", e); } } @@ -399,7 +413,10 @@ static boolean shouldRetry(Exception e) { actual instanceof AlreadyClosedException || actual instanceof ElasticsearchSecurityException || // If user does not have sufficient privileges actual instanceof ClusterBlockException || // If leader index is closed or no elected master - actual instanceof IndexClosedException; // If follow index is closed + actual instanceof IndexClosedException || // If follow index is closed + actual instanceof NodeDisconnectedException || + actual instanceof NodeNotConnectedException || + (actual.getMessage() != null && actual.getMessage().contains("TransportService is closed")); } // These methods are protected for testing purposes: @@ -437,7 +454,7 @@ public synchronized ShardFollowNodeTaskStatus getStatus() { timeSinceLastFetchMillis = -1; } return new ShardFollowNodeTaskStatus( - params.getLeaderCluster(), + params.getRemoteCluster(), params.getLeaderShardId().getIndexName(), params.getFollowShardId().getIndexName(), getFollowShardId().getId(), @@ -446,20 +463,21 @@ public synchronized ShardFollowNodeTaskStatus getStatus() { followerGlobalCheckpoint, followerMaxSeqNo, lastRequestedSeqNo, - numConcurrentReads, - numConcurrentWrites, + numOutstandingReads, + numOutstandingWrites, buffer.size(), + bufferSizeInBytes, currentMappingVersion, - totalFetchTimeMillis, - totalFetchTookTimeMillis, - numberOfSuccessfulFetches, - numberOfFailedFetches, - operationsReceived, - totalTransferredBytes, - totalIndexTimeMillis, - numberOfSuccessfulBulkOperations, - numberOfFailedBulkOperations, - numberOfOperationsIndexed, + totalReadTimeMillis, + totalReadRemoteExecTimeMillis, + successfulReadRequests, + failedReadRequests, + operationsRead, + bytesRead, + totalWriteTimeMillis, + successfulWriteRequests, + failedWriteRequests, + operationWritten, new TreeMap<>( fetchExceptions .entrySet() diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index 2dc08de8034be..f22fe0d2238ac 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -36,7 +36,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { public static final Set HEADER_FILTERS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList("es-security-runas-user", "_xpack_security_authentication"))); - static final ParseField LEADER_CLUSTER_FIELD = new ParseField("leader_cluster"); + static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); static final ParseField FOLLOW_SHARD_INDEX_FIELD = new ParseField("follow_shard_index"); static final ParseField FOLLOW_SHARD_INDEX_UUID_FIELD = new ParseField("follow_shard_index_uuid"); static final ParseField FOLLOW_SHARD_SHARDID_FIELD = new ParseField("follow_shard_shard"); @@ -44,99 +44,124 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { static final ParseField LEADER_SHARD_INDEX_UUID_FIELD = new ParseField("leader_shard_index_uuid"); static final ParseField LEADER_SHARD_SHARDID_FIELD = new ParseField("leader_shard_shard"); static final ParseField HEADERS = new ParseField("headers"); - public static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count"); - public static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches"); - public static final ParseField MAX_BATCH_SIZE = new ParseField("max_batch_size"); - public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); + public static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); + public static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); + public static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); + public static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); + public static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); + public static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); + public static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); - public static final ParseField POLL_TIMEOUT = new ParseField("poll_timeout"); + public static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); @SuppressWarnings("unchecked") private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, - (a) -> new ShardFollowTask((String) a[0], new ShardId((String) a[1], (String) a[2], (int) a[3]), - new ShardId((String) a[4], (String) a[5], (int) a[6]), (int) a[7], (int) a[8], (ByteSizeValue) a[9], - (int) a[10], (int) a[11], (TimeValue) a[12], (TimeValue) a[13], (Map) a[14])); + (a) -> new ShardFollowTask((String) a[0], + new ShardId((String) a[1], (String) a[2], (int) a[3]), new ShardId((String) a[4], (String) a[5], (int) a[6]), + (int) a[7], (ByteSizeValue) a[8], (int) a[9], (int) a[10], (ByteSizeValue) a[11], (int) a[12], + (int) a[13], (ByteSizeValue) a[14], (TimeValue) a[15], (TimeValue) a[16], (Map) a[17])); static { - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_CLUSTER_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REMOTE_CLUSTER_FIELD); PARSER.declareString(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_INDEX_FIELD); PARSER.declareString(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_INDEX_UUID_FIELD); PARSER.declareInt(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_SHARDID_FIELD); PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_FIELD); PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_UUID_FIELD); PARSER.declareInt(ConstructingObjectParser.constructorArg(), LEADER_SHARD_SHARDID_FIELD); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_BATCH_OPERATION_COUNT); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_CONCURRENT_READ_BATCHES); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_READ_REQUEST_OPERATION_COUNT); PARSER.declareField( ConstructingObjectParser.constructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_BATCH_SIZE.getPreferredName()), - MAX_BATCH_SIZE, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), MAX_READ_REQUEST_SIZE, ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_CONCURRENT_WRITE_BATCHES); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_SIZE); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_OUTSTANDING_READ_REQUESTS); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_REQUEST_OPERATION_COUNT); + PARSER.declareField( + ConstructingObjectParser.constructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_OUTSTANDING_WRITE_REQUESTS); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_COUNT); + PARSER.declareField( + ConstructingObjectParser.constructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), + MAX_WRITE_BUFFER_SIZE, + ObjectParser.ValueType.STRING); PARSER.declareField(ConstructingObjectParser.constructorArg(), (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(ConstructingObjectParser.constructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), POLL_TIMEOUT.getPreferredName()), - POLL_TIMEOUT, ObjectParser.ValueType.STRING); + (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), + READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); } - private final String leaderCluster; + private final String remoteCluster; private final ShardId followShardId; private final ShardId leaderShardId; - private final int maxBatchOperationCount; - private final int maxConcurrentReadBatches; - private final ByteSizeValue maxBatchSize; - private final int maxConcurrentWriteBatches; - private final int maxWriteBufferSize; + private final int maxReadRequestOperationCount; + private final ByteSizeValue maxReadRequestSize; + private final int maxOutstandingReadRequests; + private final int maxWriteRequestOperationCount; + private final ByteSizeValue maxWriteRequestSize; + private final int maxOutstandingWriteRequests; + private final int maxWriteBufferCount; + private final ByteSizeValue maxWriteBufferSize; private final TimeValue maxRetryDelay; - private final TimeValue pollTimeout; + private final TimeValue readPollTimeout; private final Map headers; ShardFollowTask( - final String leaderCluster, + final String remoteCluster, final ShardId followShardId, final ShardId leaderShardId, - final int maxBatchOperationCount, - final int maxConcurrentReadBatches, - final ByteSizeValue maxBatchSize, - final int maxConcurrentWriteBatches, - final int maxWriteBufferSize, + final int maxReadRequestOperationCount, + final ByteSizeValue maxReadRequestSize, + final int maxOutstandingReadRequests, + final int maxWriteRequestOperationCount, + final ByteSizeValue maxWriteRequestSize, + final int maxOutstandingWriteRequests, + final int maxWriteBufferCount, + final ByteSizeValue maxWriteBufferSize, final TimeValue maxRetryDelay, - final TimeValue pollTimeout, + final TimeValue readPollTimeout, final Map headers) { - this.leaderCluster = leaderCluster; + this.remoteCluster = remoteCluster; this.followShardId = followShardId; this.leaderShardId = leaderShardId; - this.maxBatchOperationCount = maxBatchOperationCount; - this.maxConcurrentReadBatches = maxConcurrentReadBatches; - this.maxBatchSize = maxBatchSize; - this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + this.maxReadRequestOperationCount = maxReadRequestOperationCount; + this.maxReadRequestSize = maxReadRequestSize; + this.maxOutstandingReadRequests = maxOutstandingReadRequests; + this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; + this.maxWriteRequestSize = maxWriteRequestSize; + this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; + this.maxWriteBufferCount = maxWriteBufferCount; this.maxWriteBufferSize = maxWriteBufferSize; this.maxRetryDelay = maxRetryDelay; - this.pollTimeout = pollTimeout; + this.readPollTimeout = readPollTimeout; this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } public ShardFollowTask(StreamInput in) throws IOException { - this.leaderCluster = in.readOptionalString(); + this.remoteCluster = in.readString(); this.followShardId = ShardId.readShardId(in); this.leaderShardId = ShardId.readShardId(in); - this.maxBatchOperationCount = in.readVInt(); - this.maxConcurrentReadBatches = in.readVInt(); - this.maxBatchSize = new ByteSizeValue(in); - this.maxConcurrentWriteBatches = in.readVInt(); - this.maxWriteBufferSize = in.readVInt(); + this.maxReadRequestOperationCount = in.readVInt(); + this.maxReadRequestSize = new ByteSizeValue(in); + this.maxOutstandingReadRequests = in.readVInt(); + this.maxWriteRequestOperationCount = in.readVInt(); + this.maxWriteRequestSize = new ByteSizeValue(in); + this.maxOutstandingWriteRequests = in.readVInt(); + this.maxWriteBufferCount = in.readVInt(); + this.maxWriteBufferSize = new ByteSizeValue(in); this.maxRetryDelay = in.readTimeValue(); - this.pollTimeout = in.readTimeValue(); + this.readPollTimeout = in.readTimeValue(); this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); } - public String getLeaderCluster() { - return leaderCluster; + public String getRemoteCluster() { + return remoteCluster; } public ShardId getFollowShardId() { @@ -147,32 +172,44 @@ public ShardId getLeaderShardId() { return leaderShardId; } - public int getMaxBatchOperationCount() { - return maxBatchOperationCount; + public int getMaxReadRequestOperationCount() { + return maxReadRequestOperationCount; + } + + public int getMaxOutstandingReadRequests() { + return maxOutstandingReadRequests; + } + + public int getMaxWriteRequestOperationCount() { + return maxWriteRequestOperationCount; + } + + public ByteSizeValue getMaxWriteRequestSize() { + return maxWriteRequestSize; } - public int getMaxConcurrentReadBatches() { - return maxConcurrentReadBatches; + public int getMaxOutstandingWriteRequests() { + return maxOutstandingWriteRequests; } - public int getMaxConcurrentWriteBatches() { - return maxConcurrentWriteBatches; + public int getMaxWriteBufferCount() { + return maxWriteBufferCount; } - public int getMaxWriteBufferSize() { + public ByteSizeValue getMaxWriteBufferSize() { return maxWriteBufferSize; } - public ByteSizeValue getMaxBatchSize() { - return maxBatchSize; + public ByteSizeValue getMaxReadRequestSize() { + return maxReadRequestSize; } public TimeValue getMaxRetryDelay() { return maxRetryDelay; } - public TimeValue getPollTimeout() { - return pollTimeout; + public TimeValue getReadPollTimeout() { + return readPollTimeout; } public String getTaskId() { @@ -190,16 +227,19 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(leaderCluster); + out.writeString(remoteCluster); followShardId.writeTo(out); leaderShardId.writeTo(out); - out.writeVLong(maxBatchOperationCount); - out.writeVInt(maxConcurrentReadBatches); - maxBatchSize.writeTo(out); - out.writeVInt(maxConcurrentWriteBatches); - out.writeVInt(maxWriteBufferSize); + out.writeVLong(maxReadRequestOperationCount); + maxReadRequestSize.writeTo(out); + out.writeVInt(maxOutstandingReadRequests); + out.writeVLong(maxWriteRequestOperationCount); + maxWriteRequestSize.writeTo(out); + out.writeVInt(maxOutstandingWriteRequests); + out.writeVInt(maxWriteBufferCount); + maxWriteBufferSize.writeTo(out); out.writeTimeValue(maxRetryDelay); - out.writeTimeValue(pollTimeout); + out.writeTimeValue(readPollTimeout); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } @@ -210,22 +250,23 @@ public static ShardFollowTask fromXContent(XContentParser parser) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (leaderCluster != null) { - builder.field(LEADER_CLUSTER_FIELD.getPreferredName(), leaderCluster); - } + builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), remoteCluster); builder.field(FOLLOW_SHARD_INDEX_FIELD.getPreferredName(), followShardId.getIndex().getName()); builder.field(FOLLOW_SHARD_INDEX_UUID_FIELD.getPreferredName(), followShardId.getIndex().getUUID()); builder.field(FOLLOW_SHARD_SHARDID_FIELD.getPreferredName(), followShardId.id()); builder.field(LEADER_SHARD_INDEX_FIELD.getPreferredName(), leaderShardId.getIndex().getName()); builder.field(LEADER_SHARD_INDEX_UUID_FIELD.getPreferredName(), leaderShardId.getIndex().getUUID()); builder.field(LEADER_SHARD_SHARDID_FIELD.getPreferredName(), leaderShardId.id()); - builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); - builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); - builder.field(MAX_BATCH_SIZE.getPreferredName(), maxBatchSize.getStringRep()); - builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); + builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); + builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); + builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); + builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); + builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); + builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); - builder.field(POLL_TIMEOUT.getPreferredName(), pollTimeout.getStringRep()); + builder.field(READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); builder.field(HEADERS.getPreferredName(), headers); return builder.endObject(); } @@ -235,32 +276,38 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ShardFollowTask that = (ShardFollowTask) o; - return Objects.equals(leaderCluster, that.leaderCluster) && + return Objects.equals(remoteCluster, that.remoteCluster) && Objects.equals(followShardId, that.followShardId) && Objects.equals(leaderShardId, that.leaderShardId) && - maxBatchOperationCount == that.maxBatchOperationCount && - maxConcurrentReadBatches == that.maxConcurrentReadBatches && - maxConcurrentWriteBatches == that.maxConcurrentWriteBatches && - maxBatchSize.equals(that.maxBatchSize) && - maxWriteBufferSize == that.maxWriteBufferSize && + maxReadRequestOperationCount == that.maxReadRequestOperationCount && + maxReadRequestSize.equals(that.maxReadRequestSize) && + maxOutstandingReadRequests == that.maxOutstandingReadRequests && + maxWriteRequestOperationCount == that.maxWriteRequestOperationCount && + maxWriteRequestSize.equals(that.maxWriteRequestSize) && + maxOutstandingWriteRequests == that.maxOutstandingWriteRequests && + maxWriteBufferCount == that.maxWriteBufferCount && + maxWriteBufferSize.equals(that.maxWriteBufferSize) && Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(pollTimeout, that.pollTimeout) && + Objects.equals(readPollTimeout, that.readPollTimeout) && Objects.equals(headers, that.headers); } @Override public int hashCode() { return Objects.hash( - leaderCluster, + remoteCluster, followShardId, leaderShardId, - maxBatchOperationCount, - maxConcurrentReadBatches, - maxConcurrentWriteBatches, - maxBatchSize, + maxReadRequestOperationCount, + maxReadRequestSize, + maxOutstandingReadRequests, + maxWriteRequestOperationCount, + maxWriteRequestSize, + maxOutstandingWriteRequests, + maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, - pollTimeout, + readPollTimeout, headers ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 0bb861d795ffa..5a82b45cf8c38 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -67,15 +67,6 @@ public ShardFollowTasksExecutor(Settings settings, Client client, ThreadPool thr @Override public void validate(ShardFollowTask params, ClusterState clusterState) { - if (params.getLeaderCluster() == null) { - // We can only validate IndexRoutingTable in local cluster, - // for remote cluster we would need to make a remote call and we cannot do this here. - IndexRoutingTable routingTable = clusterState.getRoutingTable().index(params.getLeaderShardId().getIndex()); - if (routingTable.shard(params.getLeaderShardId().id()).primaryShard().started() == false) { - throw new IllegalArgumentException("Not all copies of leader shard are started"); - } - } - IndexRoutingTable routingTable = clusterState.getRoutingTable().index(params.getFollowShardId().getIndex()); if (routingTable.shard(params.getFollowShardId().id()).primaryShard().started() == false) { throw new IllegalArgumentException("Not all copies of follow shard are started"); @@ -88,8 +79,8 @@ protected AllocatedPersistentTask createTask(long id, String type, String action Map headers) { ShardFollowTask params = taskInProgress.getParams(); final Client leaderClient; - if (params.getLeaderCluster() != null) { - leaderClient = wrapClient(client.getRemoteClusterClient(params.getLeaderCluster()), params.getHeaders()); + if (params.getRemoteCluster() != null) { + leaderClient = wrapClient(client.getRemoteClusterClient(params.getRemoteCluster()), params.getHeaders()); } else { leaderClient = wrapClient(client, params.getHeaders()); } @@ -162,8 +153,8 @@ protected void innerSendShardChangesRequest(long from, int maxOperationCount, Co new ShardChangesAction.Request(params.getLeaderShardId(), recordedLeaderShardHistoryUUID); request.setFromSeqNo(from); request.setMaxOperationCount(maxOperationCount); - request.setMaxBatchSize(params.getMaxBatchSize()); - request.setPollTimeout(params.getPollTimeout()); + request.setMaxBatchSize(params.getMaxReadRequestSize()); + request.setPollTimeout(params.getReadPollTimeout()); leaderClient.execute(ShardChangesAction.INSTANCE, request, ActionListener.wrap(handler::accept, errorHandler)); } }; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java index 47fd785a0d371..02f483cc843a8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java @@ -7,27 +7,27 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.stream.Collectors; -public class TransportPauseFollowAction extends HandledTransportAction { +public class TransportPauseFollowAction extends TransportMasterNodeAction { - private final Client client; private final PersistentTasksService persistentTasksService; @Inject @@ -35,86 +35,60 @@ public TransportPauseFollowAction( final Settings settings, final TransportService transportService, final ActionFilters actionFilters, - final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final IndexNameExpressionResolver indexNameExpressionResolver, final PersistentTasksService persistentTasksService) { - super(settings, PauseFollowAction.NAME, transportService, actionFilters, PauseFollowAction.Request::new); - this.client = client; + super(settings, PauseFollowAction.NAME, transportService, clusterService, threadPool, actionFilters, + PauseFollowAction.Request::new, indexNameExpressionResolver); this.persistentTasksService = persistentTasksService; } @Override - protected void doExecute( - final Task task, - final PauseFollowAction.Request request, - final ActionListener listener) { - - client.admin().cluster().state(new ClusterStateRequest(), ActionListener.wrap(r -> { - PersistentTasksCustomMetaData persistentTasksMetaData = r.getState().metaData().custom(PersistentTasksCustomMetaData.TYPE); - if (persistentTasksMetaData == null) { - listener.onFailure(new IllegalArgumentException("no shard follow tasks for [" + request.getFollowIndex() + "]")); - return; - } - - List shardFollowTaskIds = persistentTasksMetaData.tasks().stream() - .filter(persistentTask -> ShardFollowTask.NAME.equals(persistentTask.getTaskName())) - .filter(persistentTask -> { - ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); - return shardFollowTask.getFollowShardId().getIndexName().equals(request.getFollowIndex()); - }) - .map(PersistentTasksCustomMetaData.PersistentTask::getId) - .collect(Collectors.toList()); - - if (shardFollowTaskIds.isEmpty()) { - listener.onFailure(new IllegalArgumentException("no shard follow tasks for [" + request.getFollowIndex() + "]")); - return; - } - - final AtomicInteger counter = new AtomicInteger(shardFollowTaskIds.size()); - final AtomicReferenceArray responses = new AtomicReferenceArray<>(shardFollowTaskIds.size()); - int i = 0; - - for (String taskId : shardFollowTaskIds) { - final int shardId = i++; - persistentTasksService.sendRemoveRequest(taskId, - new ActionListener>() { - @Override - public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { - responses.set(shardId, task); - finalizeResponse(); - } + protected String executor() { + return ThreadPool.Names.SAME; + } - @Override - public void onFailure(Exception e) { - responses.set(shardId, e); - finalizeResponse(); - } + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } - void finalizeResponse() { - Exception error = null; - if (counter.decrementAndGet() == 0) { - for (int j = 0; j < responses.length(); j++) { - Object response = responses.get(j); - if (response instanceof Exception) { - if (error == null) { - error = (Exception) response; - } else { - error.addSuppressed((Throwable) response); - } - } - } + @Override + protected void masterOperation(PauseFollowAction.Request request, + ClusterState state, + ActionListener listener) throws Exception { + PersistentTasksCustomMetaData persistentTasksMetaData = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); + if (persistentTasksMetaData == null) { + listener.onFailure(new IllegalArgumentException("no shard follow tasks for [" + request.getFollowIndex() + "]")); + return; + } + + List shardFollowTaskIds = persistentTasksMetaData.tasks().stream() + .filter(persistentTask -> ShardFollowTask.NAME.equals(persistentTask.getTaskName())) + .filter(persistentTask -> { + ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); + return shardFollowTask.getFollowShardId().getIndexName().equals(request.getFollowIndex()); + }) + .map(PersistentTasksCustomMetaData.PersistentTask::getId) + .collect(Collectors.toList()); + + if (shardFollowTaskIds.isEmpty()) { + listener.onFailure(new IllegalArgumentException("no shard follow tasks for [" + request.getFollowIndex() + "]")); + return; + } + + int i = 0; + final ResponseHandler responseHandler = new ResponseHandler(shardFollowTaskIds.size(), listener); + for (String taskId : shardFollowTaskIds) { + final int taskSlot = i++; + persistentTasksService.sendRemoveRequest(taskId, responseHandler.getActionListener(taskSlot)); + } + } - if (error == null) { - // include task ids? - listener.onResponse(new AcknowledgedResponse(true)); - } else { - // TODO: cancel all started tasks - listener.onFailure(error); - } - } - } - }); - } - }, listener::onFailure)); + @Override + protected ClusterBlockException checkBlock(PauseFollowAction.Request request, ClusterState state) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getFollowIndex()); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index 8ac28e23fda5d..8416c414990df 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -76,7 +76,7 @@ protected void masterOperation(PutAutoFollowPatternAction.Request request, listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; } - final Client leaderClient = client.getRemoteClusterClient(request.getLeaderCluster()); + final Client leaderClient = client.getRemoteClusterClient(request.getRemoteCluster()); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear(); clusterStateRequest.metaData(true); @@ -93,7 +93,7 @@ protected void masterOperation(PutAutoFollowPatternAction.Request request, ActionListener.wrap( clusterStateResponse -> { final ClusterState leaderClusterState = clusterStateResponse.getState(); - clusterService.submitStateUpdateTask("put-auto-follow-pattern-" + request.getLeaderCluster(), + clusterService.submitStateUpdateTask("put-auto-follow-pattern-" + request.getRemoteCluster(), new AckedClusterStateUpdateTask(request, listener) { @Override @@ -157,16 +157,19 @@ static ClusterState innerPut(PutAutoFollowPatternAction.Request request, } AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - request.getLeaderCluster(), + request.getRemoteCluster(), request.getLeaderIndexPatterns(), request.getFollowIndexNamePattern(), - request.getMaxBatchOperationCount(), + request.getMaxReadRequestOperationCount(), + request.getMaxReadRequestSize(), request.getMaxConcurrentReadBatches(), - request.getMaxBatchSize(), + request.getMaxWriteRequestOperationCount(), + request.getMaxWriteRequestSize(), request.getMaxConcurrentWriteBatches(), + request.getMaxWriteBufferCount(), request.getMaxWriteBufferSize(), request.getMaxRetryDelay(), - request.getPollTimeout()); + request.getReadPollTimeout()); patterns.put(request.getName(), autoFollowPattern); ClusterState.Builder newState = ClusterState.builder(localState); newState.metaData(MetaData.builder(localState.getMetaData()) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 13d173ed815c5..cdf496cc03287 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -95,22 +95,22 @@ protected void masterOperation( listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; } - String leaderCluster = request.getFollowRequest().getLeaderCluster(); + String remoteCluster = request.getRemoteCluster(); // Validates whether the leader cluster has been configured properly: - client.getRemoteClusterClient(leaderCluster); + client.getRemoteClusterClient(remoteCluster); - String leaderIndex = request.getFollowRequest().getLeaderIndex(); - createFollowerIndexAndFollowRemoteIndex(request, leaderCluster, leaderIndex, listener); + String leaderIndex = request.getLeaderIndex(); + createFollowerIndexAndFollowRemoteIndex(request, remoteCluster, leaderIndex, listener); } private void createFollowerIndexAndFollowRemoteIndex( final PutFollowAction.Request request, - final String leaderCluster, + final String remoteCluster, final String leaderIndex, final ActionListener listener) { ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUUIDs( client, - leaderCluster, + remoteCluster, leaderIndex, listener::onFailure, (historyUUID, leaderIndexMetaData) -> createFollowerIndex(leaderIndexMetaData, historyUUID, request, listener)); @@ -122,10 +122,13 @@ private void createFollowerIndex( final PutFollowAction.Request request, final ActionListener listener) { if (leaderIndexMetaData == null) { - listener.onFailure(new IllegalArgumentException("leader index [" + request.getFollowRequest().getLeaderIndex() + - "] does not exist")); + listener.onFailure(new IllegalArgumentException("leader index [" + request.getLeaderIndex() + "] does not exist")); return; } + if (leaderIndexMetaData.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { + listener.onFailure( + new IllegalArgumentException("leader index [" + request.getLeaderIndex() + "] does not have soft deletes enabled")); + } ActionListener handler = ActionListener.wrap( result -> { @@ -160,6 +163,8 @@ public ClusterState execute(final ClusterState currentState) throws Exception { Map metadata = new HashMap<>(); metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, String.join(",", historyUUIDs)); metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY, leaderIndexMetaData.getIndexUUID()); + metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY, leaderIndexMetaData.getIndex().getName()); + metadata.put(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY, request.getRemoteCluster()); imdBuilder.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, metadata); // Copy all settings, but overwrite a few settings. diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 569e2d2cacf11..cdbe3b25f1d6a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingSlowLog; import org.elasticsearch.index.SearchSlowLog; @@ -31,7 +32,6 @@ import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -48,19 +48,20 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.stream.Collectors; public class TransportResumeFollowAction extends HandledTransportAction { - static final ByteSizeValue DEFAULT_MAX_BATCH_SIZE = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + static final ByteSizeValue DEFAULT_MAX_READ_REQUEST_SIZE = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + static final ByteSizeValue DEFAULT_MAX_WRITE_REQUEST_SIZE = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); private static final TimeValue DEFAULT_MAX_RETRY_DELAY = new TimeValue(500); - private static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; - private static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; - private static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; - private static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; - static final TimeValue DEFAULT_POLL_TIMEOUT = TimeValue.timeValueMinutes(1); + private static final int DEFAULT_MAX_OUTSTANDING_WRITE_REQUESTS = 9; + private static final int DEFAULT_MAX_WRITE_BUFFER_COUNT = Integer.MAX_VALUE; + private static final ByteSizeValue DEFAULT_MAX_WRITE_BUFFER_SIZE = new ByteSizeValue(512, ByteSizeUnit.MB); + private static final int DEFAULT_MAX_READ_REQUEST_OPERATION_COUNT = 5120; + private static final int DEFAULT_MAX_WRITE_REQUEST_OPERATION_COUNT = 5120; + private static final int DEFAULT_MAX_OUTSTANDING_READ_REQUESTS = 12; + static final TimeValue DEFAULT_READ_POLL_TIMEOUT = TimeValue.timeValueMinutes(1); private final Client client; private final ThreadPool threadPool; @@ -97,33 +98,34 @@ protected void doExecute(final Task task, listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; } - final String clusterAlias = request.getLeaderCluster(); - // Validates whether the leader cluster has been configured properly: - client.getRemoteClusterClient(clusterAlias); - - final String leaderIndex = request.getLeaderIndex(); - followRemoteIndex(request, clusterAlias, leaderIndex, listener); - } - private void followRemoteIndex( - final ResumeFollowAction.Request request, - final String clusterAlias, - final String leaderIndex, - final ActionListener listener) { final ClusterState state = clusterService.state(); final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + if (followerIndexMetadata == null) { + listener.onFailure(new IndexNotFoundException(request.getFollowerIndex())); + return; + } + + final Map ccrMetadata = followerIndexMetadata.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); + if (ccrMetadata == null) { + throw new IllegalArgumentException("follow index ["+ request.getFollowerIndex() + "] does not have ccr metadata"); + } + final String leaderCluster = ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY); + // Validates whether the leader cluster has been configured properly: + client.getRemoteClusterClient(leaderCluster); + final String leaderIndex = ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY); ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUUIDs( - client, - clusterAlias, - leaderIndex, - listener::onFailure, - (leaderHistoryUUID, leaderIndexMetadata) -> { - try { - start(request, clusterAlias, leaderIndexMetadata, followerIndexMetadata, leaderHistoryUUID, listener); - } catch (final IOException e) { - listener.onFailure(e); - } - }); + client, + leaderCluster, + leaderIndex, + listener::onFailure, + (leaderHistoryUUID, leaderIndexMetadata) -> { + try { + start(request, leaderCluster, leaderIndexMetadata, followerIndexMetadata, leaderHistoryUUID, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + }); } /** @@ -142,62 +144,22 @@ void start( IndexMetaData leaderIndexMetadata, IndexMetaData followIndexMetadata, String[] leaderIndexHistoryUUIDs, - ActionListener handler) throws IOException { + ActionListener listener) throws IOException { MapperService mapperService = followIndexMetadata != null ? indicesService.createIndexMapperService(followIndexMetadata) : null; validate(request, leaderIndexMetadata, followIndexMetadata, leaderIndexHistoryUUIDs, mapperService); final int numShards = followIndexMetadata.getNumberOfShards(); - final AtomicInteger counter = new AtomicInteger(numShards); - final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + final ResponseHandler handler = new ResponseHandler(numShards, listener); Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - for (int i = 0; i < numShards; i++) { - final int shardId = i; + for (int shardId = 0; shardId < numShards; shardId++) { String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; final ShardFollowTask shardFollowTask = createShardFollowTask(shardId, clusterNameAlias, request, leaderIndexMetadata, followIndexMetadata, filteredHeaders); - persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, - new ActionListener>() { - @Override - public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { - responses.set(shardId, task); - finalizeResponse(); - } - - @Override - public void onFailure(Exception e) { - responses.set(shardId, e); - finalizeResponse(); - } - - void finalizeResponse() { - Exception error = null; - if (counter.decrementAndGet() == 0) { - for (int j = 0; j < responses.length(); j++) { - Object response = responses.get(j); - if (response instanceof Exception) { - if (error == null) { - error = (Exception) response; - } else { - error.addSuppressed((Throwable) response); - } - } - } - - if (error == null) { - // include task ids? - handler.onResponse(new AcknowledgedResponse(true)); - } else { - // TODO: cancel all started tasks - handler.onFailure(error); - } - } - } - } - ); + persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, handler.getActionListener(shardId)); } } @@ -207,13 +169,6 @@ static void validate( final IndexMetaData followIndex, final String[] leaderIndexHistoryUUID, final MapperService followerMapperService) { - String leaderIndexName = request.getLeaderCluster() + ":" + request.getLeaderIndex(); - if (leaderIndex == null) { - throw new IllegalArgumentException("leader index [" + leaderIndexName + "] does not exist"); - } - if (followIndex == null) { - throw new IllegalArgumentException("follow index [" + request.getFollowerIndex() + "] does not exist"); - } Map ccrIndexMetadata = followIndex.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); if (ccrIndexMetadata == null) { throw new IllegalArgumentException("follow index ["+ followIndex.getIndex().getName() + "] does not have ccr metadata"); @@ -238,7 +193,8 @@ static void validate( } if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { - throw new IllegalArgumentException("leader index [" + leaderIndexName + "] does not have soft deletes enabled"); + throw new IllegalArgumentException("leader index [" + leaderIndex.getIndex().getName() + + "] does not have soft deletes enabled"); } if (followIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { throw new IllegalArgumentException("follower index [" + request.getFollowerIndex() + "] does not have soft deletes enabled"); @@ -278,35 +234,56 @@ private static ShardFollowTask createShardFollowTask( IndexMetaData followIndexMetadata, Map filteredHeaders ) { - int maxBatchOperationCount; - if (request.getMaxBatchOperationCount() != null) { - maxBatchOperationCount = request.getMaxBatchOperationCount(); + int maxReadRequestOperationCount; + if (request.getMaxReadRequestOperationCount() != null) { + maxReadRequestOperationCount = request.getMaxReadRequestOperationCount(); + } else { + maxReadRequestOperationCount = DEFAULT_MAX_READ_REQUEST_OPERATION_COUNT; + } + + ByteSizeValue maxReadRequestSize; + if (request.getMaxReadRequestSize() != null) { + maxReadRequestSize = request.getMaxReadRequestSize(); + } else { + maxReadRequestSize = DEFAULT_MAX_READ_REQUEST_SIZE; + } + + int maxOutstandingReadRequests; + if (request.getMaxOutstandingReadRequests() != null){ + maxOutstandingReadRequests = request.getMaxOutstandingReadRequests(); + } else { + maxOutstandingReadRequests = DEFAULT_MAX_OUTSTANDING_READ_REQUESTS; + } + + final int maxWriteRequestOperationCount; + if (request.getMaxWriteRequestOperationCount() != null) { + maxWriteRequestOperationCount = request.getMaxWriteRequestOperationCount(); } else { - maxBatchOperationCount = DEFAULT_MAX_BATCH_OPERATION_COUNT; + maxWriteRequestOperationCount = DEFAULT_MAX_WRITE_REQUEST_OPERATION_COUNT; } - int maxConcurrentReadBatches; - if (request.getMaxConcurrentReadBatches() != null){ - maxConcurrentReadBatches = request.getMaxConcurrentReadBatches(); + final ByteSizeValue maxWriteRequestSize; + if (request.getMaxWriteRequestSize() != null) { + maxWriteRequestSize = request.getMaxWriteRequestSize(); } else { - maxConcurrentReadBatches = DEFAULT_MAX_CONCURRENT_READ_BATCHES; + maxWriteRequestSize = DEFAULT_MAX_WRITE_REQUEST_SIZE; } - ByteSizeValue maxBatchSize; - if (request.getMaxBatchSize() != null) { - maxBatchSize = request.getMaxBatchSize(); + int maxOutstandingWriteRequests; + if (request.getMaxOutstandingWriteRequests() != null) { + maxOutstandingWriteRequests = request.getMaxOutstandingWriteRequests(); } else { - maxBatchSize = DEFAULT_MAX_BATCH_SIZE; + maxOutstandingWriteRequests = DEFAULT_MAX_OUTSTANDING_WRITE_REQUESTS; } - int maxConcurrentWriteBatches; - if (request.getMaxConcurrentWriteBatches() != null) { - maxConcurrentWriteBatches = request.getMaxConcurrentWriteBatches(); + int maxWriteBufferCount; + if (request.getMaxWriteBufferCount() != null) { + maxWriteBufferCount = request.getMaxWriteBufferCount(); } else { - maxConcurrentWriteBatches = DEFAULT_MAX_CONCURRENT_WRITE_BATCHES; + maxWriteBufferCount = DEFAULT_MAX_WRITE_BUFFER_COUNT; } - int maxWriteBufferSize; + ByteSizeValue maxWriteBufferSize; if (request.getMaxWriteBufferSize() != null) { maxWriteBufferSize = request.getMaxWriteBufferSize(); } else { @@ -314,19 +291,22 @@ private static ShardFollowTask createShardFollowTask( } TimeValue maxRetryDelay = request.getMaxRetryDelay() == null ? DEFAULT_MAX_RETRY_DELAY : request.getMaxRetryDelay(); - TimeValue pollTimeout = request.getPollTimeout() == null ? DEFAULT_POLL_TIMEOUT : request.getPollTimeout(); + TimeValue readPollTimeout = request.getReadPollTimeout() == null ? DEFAULT_READ_POLL_TIMEOUT : request.getReadPollTimeout(); return new ShardFollowTask( clusterAliasName, new ShardId(followIndexMetadata.getIndex(), shardId), new ShardId(leaderIndexMetadata.getIndex(), shardId), - maxBatchOperationCount, - maxConcurrentReadBatches, - maxBatchSize, - maxConcurrentWriteBatches, + maxReadRequestOperationCount, + maxReadRequestSize, + maxOutstandingReadRequests, + maxWriteRequestOperationCount, + maxWriteRequestSize, + maxOutstandingWriteRequests, + maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, - pollTimeout, + readPollTimeout, filteredHeaders ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java index 9d4df8d856716..c7be6382fa7dd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java @@ -31,8 +31,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - Request request = new Request(); - request.setFollowIndex(restRequest.param("index")); + Request request = new Request(restRequest.param("index")); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java index d6116ff274b58..7b21422cb9867 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java @@ -7,6 +7,7 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -31,7 +32,13 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - Request request = new Request(RestResumeFollowAction.createRequest(restRequest)); + Request request = createRequest(restRequest); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } + + static Request createRequest(RestRequest restRequest) throws IOException { + try (XContentParser parser = restRequest.contentOrSourceParamParser()) { + return Request.fromXContent(parser, restRequest.param("index")); + } + } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index b4229e1b26842..a4f9d69bfa924 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -46,6 +46,8 @@ import org.elasticsearch.xpack.ccr.LocalStateCcr; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -65,6 +67,7 @@ import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; public abstract class CcrIntegTestCase extends ESTestCase { @@ -103,6 +106,7 @@ public final void startClusters() throws Exception { @After public void afterTest() throws Exception { + ensureEmptyWriteBuffers(); String masterNode = clusterGroup.followerCluster.getMasterName(); ClusterService clusterService = clusterGroup.followerCluster.getInstance(ClusterService.class, masterNode); removeCCRRelatedMetadataFromClusterState(clusterService); @@ -263,6 +267,18 @@ protected final RefreshResponse refresh(Client client, String... indices) { return actionGet; } + protected void ensureEmptyWriteBuffers() throws Exception { + assertBusy(() -> { + FollowStatsAction.StatsResponses statsResponses = + leaderClient().execute(FollowStatsAction.INSTANCE, new FollowStatsAction.StatsRequest()).actionGet(); + for (FollowStatsAction.StatsResponse statsResponse : statsResponses.getStatsResponses()) { + ShardFollowNodeTaskStatus status = statsResponse.status(); + assertThat(status.writeBufferOperationCount(), equalTo(0)); + assertThat(status.writeBufferSizeInBytes(), equalTo(0L)); + } + }); + } + static void removeCCRRelatedMetadataFromClusterState(ClusterService clusterService) throws Exception { CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("remove-ccr-related-metadata", new ClusterStateUpdateTask() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java index 3f5c340deed4c..439950019a65c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java @@ -16,6 +16,9 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.LocalStateCcr; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; import org.junit.After; import org.junit.Before; @@ -25,6 +28,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.CcrIntegTestCase.removeCCRRelatedMetadataFromClusterState; +import static org.hamcrest.Matchers.equalTo; public abstract class CcrSingleNodeTestCase extends ESSingleNodeTestCase { @@ -63,14 +67,32 @@ public void remoteLocalRemote() throws Exception { assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); } - protected ResumeFollowAction.Request getFollowRequest() { + protected ResumeFollowAction.Request getResumeFollowRequest() { ResumeFollowAction.Request request = new ResumeFollowAction.Request(); - request.setLeaderCluster("local"); - request.setLeaderIndex("leader"); request.setFollowerIndex("follower"); request.setMaxRetryDelay(TimeValue.timeValueMillis(10)); - request.setPollTimeout(TimeValue.timeValueMillis(10)); + request.setReadPollTimeout(TimeValue.timeValueMillis(10)); return request; } + protected PutFollowAction.Request getPutFollowRequest() { + PutFollowAction.Request request = new PutFollowAction.Request(); + request.setRemoteCluster("local"); + request.setLeaderIndex("leader"); + request.setFollowRequest(getResumeFollowRequest()); + return request; + } + + protected void ensureEmptyWriteBuffers() throws Exception { + assertBusy(() -> { + FollowStatsAction.StatsResponses statsResponses = + client().execute(FollowStatsAction.INSTANCE, new FollowStatsAction.StatsRequest()).actionGet(); + for (FollowStatsAction.StatsResponse statsResponse : statsResponses.getStatsResponses()) { + ShardFollowNodeTaskStatus status = statsResponse.status(); + assertThat(status.writeBufferOperationCount(), equalTo(0)); + assertThat(status.writeBufferSizeInBytes(), equalTo(0L)); + } + }); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 1f2cbc4961b95..305daa34d3010 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -121,12 +121,12 @@ public void testAutoFollowParameterAreDelegated() throws Exception { // Enabling auto following: PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); request.setName("my-pattern"); - request.setLeaderCluster("leader_cluster"); + request.setRemoteCluster("leader_cluster"); request.setLeaderIndexPatterns(Collections.singletonList("logs-*")); // Need to set this, because following an index in the same cluster request.setFollowIndexNamePattern("copy-{{leader_index}}"); if (randomBoolean()) { - request.setMaxWriteBufferSize(randomIntBetween(0, Integer.MAX_VALUE)); + request.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); } if (randomBoolean()) { request.setMaxConcurrentReadBatches(randomIntBetween(0, Integer.MAX_VALUE)); @@ -135,16 +135,25 @@ public void testAutoFollowParameterAreDelegated() throws Exception { request.setMaxConcurrentWriteBatches(randomIntBetween(0, Integer.MAX_VALUE)); } if (randomBoolean()) { - request.setMaxBatchOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + request.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); } if (randomBoolean()) { - request.setMaxBatchSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); + request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } if (randomBoolean()) { request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { - request.setPollTimeout(TimeValue.timeValueMillis(500)); + request.setReadPollTimeout(TimeValue.timeValueMillis(500)); + } + if (randomBoolean()) { + request.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } assertTrue(followerClient().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); @@ -157,26 +166,35 @@ public void testAutoFollowParameterAreDelegated() throws Exception { ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTasksMetaData.tasks().iterator().next().getParams(); assertThat(shardFollowTask.getLeaderShardId().getIndexName(), equalTo("logs-201901")); assertThat(shardFollowTask.getFollowShardId().getIndexName(), equalTo("copy-logs-201901")); + if (request.getMaxWriteBufferCount() != null) { + assertThat(shardFollowTask.getMaxWriteBufferCount(), equalTo(request.getMaxWriteBufferCount())); + } if (request.getMaxWriteBufferSize() != null) { assertThat(shardFollowTask.getMaxWriteBufferSize(), equalTo(request.getMaxWriteBufferSize())); } if (request.getMaxConcurrentReadBatches() != null) { - assertThat(shardFollowTask.getMaxConcurrentReadBatches(), equalTo(request.getMaxConcurrentReadBatches())); + assertThat(shardFollowTask.getMaxOutstandingReadRequests(), equalTo(request.getMaxConcurrentReadBatches())); } if (request.getMaxConcurrentWriteBatches() != null) { - assertThat(shardFollowTask.getMaxConcurrentWriteBatches(), equalTo(request.getMaxConcurrentWriteBatches())); + assertThat(shardFollowTask.getMaxOutstandingWriteRequests(), equalTo(request.getMaxConcurrentWriteBatches())); } - if (request.getMaxBatchOperationCount() != null) { - assertThat(shardFollowTask.getMaxBatchOperationCount(), equalTo(request.getMaxBatchOperationCount())); + if (request.getMaxReadRequestOperationCount() != null) { + assertThat(shardFollowTask.getMaxReadRequestOperationCount(), equalTo(request.getMaxReadRequestOperationCount())); } - if (request.getMaxBatchSize() != null) { - assertThat(shardFollowTask.getMaxBatchSize(), equalTo(request.getMaxBatchSize())); + if (request.getMaxReadRequestSize() != null) { + assertThat(shardFollowTask.getMaxReadRequestSize(), equalTo(request.getMaxReadRequestSize())); } if (request.getMaxRetryDelay() != null) { assertThat(shardFollowTask.getMaxRetryDelay(), equalTo(request.getMaxRetryDelay())); } - if (request.getPollTimeout() != null) { - assertThat(shardFollowTask.getPollTimeout(), equalTo(request.getPollTimeout())); + if (request.getReadPollTimeout() != null) { + assertThat(shardFollowTask.getReadPollTimeout(), equalTo(request.getReadPollTimeout())); + } + if (request.getMaxWriteRequestOperationCount() != null) { + assertThat(shardFollowTask.getMaxWriteRequestOperationCount(), equalTo(request.getMaxWriteRequestOperationCount())); + } + if (request.getMaxWriteRequestSize() != null) { + assertThat(shardFollowTask.getMaxWriteRequestSize(), equalTo(request.getMaxWriteRequestSize())); } }); } @@ -228,7 +246,7 @@ public void testConflictingPatterns() throws Exception { private void putAutoFollowPatterns(String name, String[] patterns) { PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); request.setName(name); - request.setLeaderCluster("leader_cluster"); + request.setRemoteCluster("leader_cluster"); request.setLeaderIndexPatterns(Arrays.asList(patterns)); // Need to set this, because following an index in the same cluster request.setFollowIndexNamePattern("copy-{{leader_index}}"); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java index 67071bd1be5b3..5dab22500a600 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java @@ -45,10 +45,13 @@ protected AutoFollowMetadata createTestInstance() { leaderPatterns, randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong()), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(500)); configs.put(Integer.toString(i), autoFollowPattern); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java index ab14f2dfb8e8b..967c3a7e8c759 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -49,7 +49,7 @@ protected Settings nodeSettings() { } public void testThatFollowingIndexIsUnavailableWithNonCompliantLicense() throws InterruptedException { - final ResumeFollowAction.Request followRequest = getFollowRequest(); + final ResumeFollowAction.Request followRequest = getResumeFollowRequest(); final CountDownLatch latch = new CountDownLatch(1); client().execute( ResumeFollowAction.INSTANCE, @@ -71,8 +71,7 @@ public void onFailure(final Exception e) { } public void testThatCreateAndFollowingIndexIsUnavailableWithNonCompliantLicense() throws InterruptedException { - final ResumeFollowAction.Request followRequest = getFollowRequest(); - final PutFollowAction.Request createAndFollowRequest = new PutFollowAction.Request(followRequest); + final PutFollowAction.Request createAndFollowRequest = getPutFollowRequest(); final CountDownLatch latch = new CountDownLatch(1); client().execute( PutFollowAction.INSTANCE, @@ -119,7 +118,7 @@ public void testThatPutAutoFollowPatternsIsUnavailableWithNonCompliantLicense() final CountDownLatch latch = new CountDownLatch(1); final PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); request.setName("name"); - request.setLeaderCluster("leader"); + request.setRemoteCluster("leader"); request.setLeaderIndexPatterns(Collections.singletonList("*")); client().execute( PutAutoFollowPatternAction.INSTANCE, @@ -149,7 +148,7 @@ public void testAutoFollowCoordinatorLogsSkippingAutoFollowCoordinationWithNonCo @Override public ClusterState execute(ClusterState currentState) throws Exception { AutoFollowPattern autoFollowPattern = new AutoFollowPattern("test_alias", Collections.singletonList("logs-*"), - null, null, null, null, null, null, null, null); + null, null, null, null, null, null, null, null, null, null, null); AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata( Collections.singletonMap("test_alias", autoFollowPattern), Collections.emptyMap(), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 17bb6c8d70d6d..794c64e6bc4ff 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -82,7 +82,6 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -100,7 +99,7 @@ public void testFollowIndex() throws Exception { assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderYellow("index1"); - final PutFollowAction.Request followRequest = follow("index1", "index2"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); final int firstBatchNumDocs = randomIntBetween(2, 64); @@ -162,7 +161,7 @@ public void testSyncMappings() throws Exception { assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderYellow("index1"); - final PutFollowAction.Request followRequest = follow("index1", "index2"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); final long firstBatchNumDocs = randomIntBetween(2, 64); @@ -202,7 +201,7 @@ public void testNoMappingDefined() throws Exception { .build())); ensureLeaderGreen("index1"); - final PutFollowAction.Request followRequest = follow("index1", "index2"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1", "doc", "1").setSource("{\"f\":1}", XContentType.JSON).get(); @@ -252,11 +251,11 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) long numDocsIndexed = Math.min(3000 * 2, randomLongBetween(maxReadSize, maxReadSize * 10)); atLeastDocsIndexed(leaderClient(), "index1", numDocsIndexed / 3); - PutFollowAction.Request followRequest = follow("index1", "index2"); - followRequest.getFollowRequest().setMaxBatchOperationCount(maxReadSize); - followRequest.getFollowRequest().setMaxConcurrentReadBatches(randomIntBetween(2, 10)); - followRequest.getFollowRequest().setMaxConcurrentWriteBatches(randomIntBetween(2, 10)); - followRequest.getFollowRequest().setMaxWriteBufferSize(randomIntBetween(1024, 10240)); + PutFollowAction.Request followRequest = putFollow("index1", "index2"); + followRequest.getFollowRequest().setMaxReadRequestOperationCount(maxReadSize); + followRequest.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(2, 10)); + followRequest.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(2, 10)); + followRequest.getFollowRequest().setMaxWriteBufferCount(randomIntBetween(1024, 10240)); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); atLeastDocsIndexed(leaderClient(), "index1", numDocsIndexed); @@ -271,7 +270,6 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfShards); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34696") public void testFollowIndexAndCloseNode() throws Exception { getFollowerCluster().ensureAtLeastNumDataNodes(3); String leaderIndexSettings = getIndexSettings(3, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); @@ -295,14 +293,17 @@ public void testFollowIndexAndCloseNode() throws Exception { }); thread.start(); - PutFollowAction.Request followRequest = follow("index1", "index2"); - followRequest.getFollowRequest().setMaxBatchOperationCount(randomIntBetween(32, 2048)); - followRequest.getFollowRequest().setMaxConcurrentReadBatches(randomIntBetween(2, 10)); - followRequest.getFollowRequest().setMaxConcurrentWriteBatches(randomIntBetween(2, 10)); + PutFollowAction.Request followRequest = putFollow("index1", "index2"); + followRequest.getFollowRequest().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); + followRequest.getFollowRequest().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + followRequest.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); + followRequest.getFollowRequest().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048)); + followRequest.getFollowRequest().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + followRequest.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); - long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getFollowRequest().getMaxBatchOperationCount(), - followRequest.getFollowRequest().getMaxBatchOperationCount() * 10)); + long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getFollowRequest().getMaxReadRequestOperationCount(), + followRequest.getFollowRequest().getMaxReadRequestOperationCount() * 10)); long minNumDocsReplicated = maxNumDocsReplicated / 3L; logger.info("waiting for at least [{}] documents to be indexed and then stop a random data node", minNumDocsReplicated); atLeastDocsIndexed(followerClient(), "index2", minNumDocsReplicated); @@ -323,7 +324,7 @@ public void testFollowIndexWithNestedField() throws Exception { assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderGreen("index1"); - final PutFollowAction.Request followRequest = follow("index1", "index2"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); final int numDocs = randomIntBetween(2, 64); @@ -359,8 +360,7 @@ public void testFollowIndexWithNestedField() throws Exception { } public void testUnfollowNonExistingIndex() { - PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request(); - unfollowRequest.setFollowIndex("non-existing-index"); + PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request("non-existing-index"); expectThrows(IllegalArgumentException.class, () -> followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).actionGet()); } @@ -372,22 +372,17 @@ public void testFollowNonExistentIndex() throws Exception { ensureLeaderGreen("test-leader"); ensureFollowerGreen("test-follower"); // Leader index does not exist. - ResumeFollowAction.Request followRequest1 = resumeFollow("non-existent-leader", "test-follower"); - expectThrows(IndexNotFoundException.class, () -> followerClient().execute(ResumeFollowAction.INSTANCE, followRequest1).actionGet()); expectThrows(IndexNotFoundException.class, - () -> followerClient().execute(PutFollowAction.INSTANCE, new PutFollowAction.Request(followRequest1)) + () -> followerClient().execute(PutFollowAction.INSTANCE, putFollow("non-existent-leader", "test-follower")) .actionGet()); // Follower index does not exist. - ResumeFollowAction.Request followRequest2 = resumeFollow("non-test-leader", "non-existent-follower"); - expectThrows(IndexNotFoundException.class, () -> followerClient().execute(ResumeFollowAction.INSTANCE, followRequest2).actionGet()); - expectThrows(IndexNotFoundException.class, - () -> followerClient().execute(PutFollowAction.INSTANCE, new PutFollowAction.Request(followRequest2)) - .actionGet()); + ResumeFollowAction.Request followRequest1 = resumeFollow("non-existent-follower"); + expectThrows(IndexNotFoundException.class, () -> followerClient().execute(ResumeFollowAction.INSTANCE, followRequest1).actionGet()); // Both indices do not exist. - ResumeFollowAction.Request followRequest3 = resumeFollow("non-existent-leader", "non-existent-follower"); - expectThrows(IndexNotFoundException.class, () -> followerClient().execute(ResumeFollowAction.INSTANCE, followRequest3).actionGet()); + ResumeFollowAction.Request followRequest2 = resumeFollow("non-existent-follower"); + expectThrows(IndexNotFoundException.class, () -> followerClient().execute(ResumeFollowAction.INSTANCE, followRequest2).actionGet()); expectThrows(IndexNotFoundException.class, - () -> followerClient().execute(PutFollowAction.INSTANCE, new PutFollowAction.Request(followRequest3)) + () -> followerClient().execute(PutFollowAction.INSTANCE, putFollow("non-existing-leader", "non-existing-follower")) .actionGet()); } @@ -404,8 +399,8 @@ public void testFollowIndexMaxOperationSizeInBytes() throws Exception { leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); } - PutFollowAction.Request followRequest = follow("index1", "index2"); - followRequest.getFollowRequest().setMaxBatchSize(new ByteSizeValue(1, ByteSizeUnit.BYTES)); + PutFollowAction.Request followRequest = putFollow("index1", "index2"); + followRequest.getFollowRequest().setMaxReadRequestSize(new ByteSizeValue(1, ByteSizeUnit.BYTES)); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); final Map firstBatchNumDocsPerShard = new HashMap<>(); @@ -427,37 +422,11 @@ public void testFollowIndexMaxOperationSizeInBytes() throws Exception { assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), 1, numDocs); } - public void testDontFollowTheWrongIndex() throws Exception { - String leaderIndexSettings = getIndexSettings(1, 0, - Collections.singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); - assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); - ensureLeaderGreen("index1"); - assertAcked(leaderClient().admin().indices().prepareCreate("index3").setSource(leaderIndexSettings, XContentType.JSON)); - ensureLeaderGreen("index3"); - - PutFollowAction.Request followRequest = follow("index1", "index2"); - followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); - - followRequest = follow("index3", "index4"); - followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); - pauseFollow("index2", "index4"); - - ResumeFollowAction.Request wrongRequest1 = resumeFollow("index1", "index4"); - Exception e = expectThrows(IllegalArgumentException.class, - () -> followerClient().execute(ResumeFollowAction.INSTANCE, wrongRequest1).actionGet()); - assertThat(e.getMessage(), containsString("follow index [index4] should reference")); - - ResumeFollowAction.Request wrongRequest2 = resumeFollow("index3", "index2"); - e = expectThrows(IllegalArgumentException.class, - () -> followerClient().execute(ResumeFollowAction.INSTANCE, wrongRequest2).actionGet()); - assertThat(e.getMessage(), containsString("follow index [index2] should reference")); - } - public void testAttemptToChangeCcrFollowingIndexSetting() throws Exception { String leaderIndexSettings = getIndexSettings(1, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON).get()); ensureLeaderYellow("index1"); - PutFollowAction.Request followRequest = follow("index1", "index2"); + PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); pauseFollow("index2"); followerClient().admin().indices().close(new CloseIndexRequest("index2")).actionGet(); @@ -478,7 +447,7 @@ public void testCloseLeaderIndex() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build())); - final PutFollowAction.Request followRequest = follow("index1", "index2"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1", "doc", "1").setSource("{}", XContentType.JSON).get(); @@ -490,10 +459,10 @@ public void testCloseLeaderIndex() throws Exception { assertThat(response.getNodeFailures(), empty()); assertThat(response.getTaskFailures(), empty()); assertThat(response.getStatsResponses(), hasSize(1)); - assertThat(response.getStatsResponses().get(0).status().numberOfFailedFetches(), greaterThanOrEqualTo(1L)); - assertThat(response.getStatsResponses().get(0).status().fetchExceptions().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().failedReadRequests(), greaterThanOrEqualTo(1L)); + assertThat(response.getStatsResponses().get(0).status().readExceptions().size(), equalTo(1)); ElasticsearchException exception = response.getStatsResponses().get(0).status() - .fetchExceptions().entrySet().iterator().next().getValue().v2(); + .readExceptions().entrySet().iterator().next().getValue().v2(); assertThat(exception.getRootCause().getMessage(), equalTo("blocked by: [FORBIDDEN/4/index closed];")); }); @@ -512,7 +481,7 @@ public void testCloseFollowIndex() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build())); - final PutFollowAction.Request followRequest = follow("index1", "index2"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1", "doc", "1").setSource("{}", XContentType.JSON).get(); @@ -525,7 +494,7 @@ public void testCloseFollowIndex() throws Exception { assertThat(response.getNodeFailures(), empty()); assertThat(response.getTaskFailures(), empty()); assertThat(response.getStatsResponses(), hasSize(1)); - assertThat(response.getStatsResponses().get(0).status().numberOfFailedBulkOperations(), greaterThanOrEqualTo(1L)); + assertThat(response.getStatsResponses().get(0).status().failedWriteRequests(), greaterThanOrEqualTo(1L)); }); followerClient().admin().indices().open(new OpenIndexRequest("index2")).actionGet(); assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().totalHits, equalTo(2L))); @@ -541,7 +510,7 @@ public void testDeleteLeaderIndex() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build())); - final PutFollowAction.Request followRequest = follow("index1", "index2"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1", "doc", "1").setSource("{}", XContentType.JSON).get(); @@ -553,10 +522,10 @@ public void testDeleteLeaderIndex() throws Exception { assertThat(response.getNodeFailures(), empty()); assertThat(response.getTaskFailures(), empty()); assertThat(response.getStatsResponses(), hasSize(1)); - assertThat(response.getStatsResponses().get(0).status().numberOfFailedFetches(), greaterThanOrEqualTo(1L)); + assertThat(response.getStatsResponses().get(0).status().failedReadRequests(), greaterThanOrEqualTo(1L)); ElasticsearchException fatalException = response.getStatsResponses().get(0).status().getFatalException(); assertThat(fatalException, notNullValue()); - assertThat(fatalException.getRootCause().getMessage(), equalTo("no such index")); + assertThat(fatalException.getRootCause().getMessage(), equalTo("no such index [index1]")); }); pauseFollow("index2"); ensureNoCcrTasks(); @@ -570,7 +539,7 @@ public void testDeleteFollowerIndex() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build())); - final PutFollowAction.Request followRequest = follow("index1", "index2"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1", "doc", "1").setSource("{}", XContentType.JSON).get(); @@ -583,10 +552,10 @@ public void testDeleteFollowerIndex() throws Exception { assertThat(response.getNodeFailures(), empty()); assertThat(response.getTaskFailures(), empty()); assertThat(response.getStatsResponses(), hasSize(1)); - assertThat(response.getStatsResponses().get(0).status().numberOfFailedBulkOperations(), greaterThanOrEqualTo(1L)); + assertThat(response.getStatsResponses().get(0).status().failedWriteRequests(), greaterThanOrEqualTo(1L)); ElasticsearchException fatalException = response.getStatsResponses().get(0).status().getFatalException(); assertThat(fatalException, notNullValue()); - assertThat(fatalException.getMessage(), equalTo("no such index")); + assertThat(fatalException.getMessage(), equalTo("no such index [index2]")); }); pauseFollow("index2"); ensureNoCcrTasks(); @@ -595,7 +564,7 @@ public void testDeleteFollowerIndex() throws Exception { public void testUnfollowIndex() throws Exception { String leaderIndexSettings = getIndexSettings(1, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON).get()); - PutFollowAction.Request followRequest = follow("index1", "index2"); + PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1", "doc").setSource("{}", XContentType.JSON).get(); assertBusy(() -> { @@ -619,7 +588,6 @@ public void testUnfollowIndex() throws Exception { assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits(), equalTo(2L)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34696") public void testFailOverOnFollower() throws Exception { int numberOfReplicas = between(1, 2); getFollowerCluster().startMasterOnlyNode(); @@ -647,7 +615,13 @@ public void testFailOverOnFollower() throws Exception { }); threads[i].start(); } - PutFollowAction.Request follow = follow("leader-index", "follower-index"); + PutFollowAction.Request follow = putFollow("leader-index", "follower-index"); + follow.getFollowRequest().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); + follow.getFollowRequest().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + follow.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); + follow.getFollowRequest().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048)); + follow.getFollowRequest().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + follow.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); followerClient().execute(PutFollowAction.INSTANCE, follow).get(); ensureFollowerGreen("follower-index"); atLeastDocsIndexed(followerClient(), "follower-index", between(20, 60)); @@ -674,17 +648,14 @@ public void testUnknownClusterAlias() throws Exception { Collections.singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderGreen("index1"); - PutFollowAction.Request followRequest = follow("index1", "index2"); - followRequest.getFollowRequest().setLeaderCluster("another_cluster"); + PutFollowAction.Request followRequest = putFollow("index1", "index2"); + followRequest.setRemoteCluster("another_cluster"); Exception e = expectThrows(IllegalArgumentException.class, () -> followerClient().execute(PutFollowAction.INSTANCE, followRequest).actionGet()); assertThat(e.getMessage(), equalTo("unknown cluster alias [another_cluster]")); - e = expectThrows(IllegalArgumentException.class, - () -> followerClient().execute(ResumeFollowAction.INSTANCE, followRequest.getFollowRequest()).actionGet()); - assertThat(e.getMessage(), equalTo("unknown cluster alias [another_cluster]")); PutAutoFollowPatternAction.Request putAutoFollowRequest = new PutAutoFollowPatternAction.Request(); putAutoFollowRequest.setName("name"); - putAutoFollowRequest.setLeaderCluster("another_cluster"); + putAutoFollowRequest.setRemoteCluster("another_cluster"); putAutoFollowRequest.setLeaderIndexPatterns(Collections.singletonList("logs-*")); e = expectThrows(IllegalArgumentException.class, () -> followerClient().execute(PutAutoFollowPatternAction.INSTANCE, putAutoFollowRequest).actionGet()); @@ -696,7 +667,7 @@ public void testAddNewReplicasOnFollower() throws Exception { String leaderIndexSettings = getIndexSettings(1, numberOfReplicas, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(leaderClient().admin().indices().prepareCreate("leader-index").setSource(leaderIndexSettings, XContentType.JSON)); - PutFollowAction.Request follow = follow("leader-index", "follower-index"); + PutFollowAction.Request follow = putFollow("leader-index", "follower-index"); followerClient().execute(PutFollowAction.INSTANCE, follow).get(); getFollowerCluster().ensureAtLeastNumDataNodes(numberOfReplicas + between(2, 3)); ensureFollowerGreen("follower-index"); @@ -787,8 +758,7 @@ private CheckedRunnable assertTask(final int numberOfPrimaryShards, f private void pauseFollow(String... indices) throws Exception { for (String index : indices) { - final PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request(); - unfollowRequest.setFollowIndex(index); + final PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request(index); followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).get(); } ensureNoCcrTasks(); @@ -998,17 +968,19 @@ private void assertTotalNumberOfOptimizedIndexing(Index followerIndex, int numbe }); } - public static PutFollowAction.Request follow(String leaderIndex, String followerIndex) { - return new PutFollowAction.Request(resumeFollow(leaderIndex, followerIndex)); + public static PutFollowAction.Request putFollow(String leaderIndex, String followerIndex) { + PutFollowAction.Request request = new PutFollowAction.Request(); + request.setRemoteCluster("leader_cluster"); + request.setLeaderIndex(leaderIndex); + request.setFollowRequest(resumeFollow(followerIndex)); + return request; } - public static ResumeFollowAction.Request resumeFollow(String leaderIndex, String followerIndex) { + public static ResumeFollowAction.Request resumeFollow(String followerIndex) { ResumeFollowAction.Request request = new ResumeFollowAction.Request(); - request.setLeaderCluster("leader_cluster"); - request.setLeaderIndex(leaderIndex); request.setFollowerIndex(followerIndex); request.setMaxRetryDelay(TimeValue.timeValueMillis(10)); - request.setPollTimeout(TimeValue.timeValueMillis(10)); + request.setReadPollTimeout(TimeValue.timeValueMillis(10)); return request; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java index 5ff1c67f32396..363897293a986 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java @@ -31,7 +31,7 @@ public void testFollowIndex() throws Exception { assertAcked(client().admin().indices().prepareCreate("leader").setSource(leaderIndexSettings, XContentType.JSON)); ensureGreen("leader"); - final PutFollowAction.Request followRequest = new PutFollowAction.Request(getFollowRequest()); + final PutFollowAction.Request followRequest = getPutFollowRequest(); client().execute(PutFollowAction.INSTANCE, followRequest).get(); final long firstBatchNumDocs = randomIntBetween(2, 64); @@ -52,8 +52,7 @@ public void testFollowIndex() throws Exception { assertThat(client().prepareSearch("follower").get().getHits().totalHits, equalTo(firstBatchNumDocs + secondBatchNumDocs)); }); - PauseFollowAction.Request pauseRequest = new PauseFollowAction.Request(); - pauseRequest.setFollowIndex("follower"); + PauseFollowAction.Request pauseRequest = new PauseFollowAction.Request("follower"); client().execute(PauseFollowAction.INSTANCE, pauseRequest); final long thirdBatchNumDocs = randomIntBetween(2, 64); @@ -61,11 +60,27 @@ public void testFollowIndex() throws Exception { client().prepareIndex("leader", "doc").setSource("{}", XContentType.JSON).get(); } - client().execute(ResumeFollowAction.INSTANCE, getFollowRequest()).get(); + client().execute(ResumeFollowAction.INSTANCE, getResumeFollowRequest()).get(); assertBusy(() -> { assertThat(client().prepareSearch("follower").get().getHits().totalHits, equalTo(firstBatchNumDocs + secondBatchNumDocs + thirdBatchNumDocs)); }); + ensureEmptyWriteBuffers(); + } + + public void testDoNotCreateFollowerIfLeaderDoesNotHaveSoftDeletes() throws Exception { + final String leaderIndexSettings = getIndexSettings(2, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "false")); + assertAcked(client().admin().indices().prepareCreate("leader-index").setSource(leaderIndexSettings, XContentType.JSON)); + ResumeFollowAction.Request followRequest = getResumeFollowRequest(); + followRequest.setFollowerIndex("follower-index"); + PutFollowAction.Request putFollowRequest = getPutFollowRequest(); + putFollowRequest.setLeaderIndex("leader-index"); + putFollowRequest.setFollowRequest(followRequest); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, + () -> client().execute(PutFollowAction.INSTANCE, putFollowRequest).actionGet()); + assertThat(error.getMessage(), equalTo("leader index [leader-index] does not have soft deletes enabled")); + assertThat(client().admin().indices().prepareExists("follower-index").get().isExists(), equalTo(false)); } private String getIndexSettings(final int numberOfShards, final int numberOfReplicas, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 3f4c70f0165f5..6b542d15044e5 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -15,13 +15,14 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; import org.elasticsearch.xpack.core.ccr.AutoFollowStats; -import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import java.util.ArrayList; import java.util.Arrays; @@ -50,13 +51,13 @@ public void testAutoFollower() { ClusterState leaderState = ClusterState.builder(new ClusterName("remote")) .metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101") - .settings(settings(Version.CURRENT)) + .settings(settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) .numberOfShards(1) .numberOfReplicas(0))) .build(); - AutoFollowPattern autoFollowPattern = - new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), + null, null, null, null, null, null, null, null, null, null, null); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -83,7 +84,7 @@ public void testAutoFollower() { AutoFollower autoFollower = new AutoFollower(handler, currentState) { @Override void getLeaderClusterState(Map headers, - String leaderClusterAlias, + String remoteCluster, BiConsumer handler) { assertThat(headers, equalTo(autoFollowHeaders.get("remote"))); handler.accept(leaderState, null); @@ -91,13 +92,13 @@ void getLeaderClusterState(Map headers, @Override void createAndFollow(Map headers, - ResumeFollowAction.Request followRequest, + PutFollowAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { assertThat(headers, equalTo(autoFollowHeaders.get("remote"))); - assertThat(followRequest.getLeaderCluster(), equalTo("remote")); + assertThat(followRequest.getRemoteCluster(), equalTo("remote")); assertThat(followRequest.getLeaderIndex(), equalTo("logs-20190101")); - assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); + assertThat(followRequest.getFollowRequest().getFollowerIndex(), equalTo("logs-20190101")); successHandler.run(); } @@ -119,8 +120,8 @@ public void testAutoFollowerClusterStateApiFailure() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - AutoFollowPattern autoFollowPattern = - new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), + null, null, null, null, null, null, null, null, null, null, null); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -143,14 +144,14 @@ public void testAutoFollowerClusterStateApiFailure() { AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override void getLeaderClusterState(Map headers, - String leaderClusterAlias, + String remoteCluster, BiConsumer handler) { handler.accept(null, failure); } @Override void createAndFollow(Map headers, - ResumeFollowAction.Request followRequest, + PutFollowAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { fail("should not get here"); @@ -172,13 +173,13 @@ public void testAutoFollowerUpdateClusterStateFailure() { ClusterState leaderState = ClusterState.builder(new ClusterName("remote")) .metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101") - .settings(settings(Version.CURRENT)) + .settings(settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) .numberOfShards(1) .numberOfReplicas(0))) .build(); - AutoFollowPattern autoFollowPattern = - new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), + null, null, null, null, null, null, null, null, null, null, null); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -204,19 +205,19 @@ public void testAutoFollowerUpdateClusterStateFailure() { AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override void getLeaderClusterState(Map headers, - String leaderClusterAlias, + String remoteCluster, BiConsumer handler) { handler.accept(leaderState, null); } @Override void createAndFollow(Map headers, - ResumeFollowAction.Request followRequest, + PutFollowAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { - assertThat(followRequest.getLeaderCluster(), equalTo("remote")); + assertThat(followRequest.getRemoteCluster(), equalTo("remote")); assertThat(followRequest.getLeaderIndex(), equalTo("logs-20190101")); - assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); + assertThat(followRequest.getFollowRequest().getFollowerIndex(), equalTo("logs-20190101")); successHandler.run(); } @@ -235,13 +236,13 @@ public void testAutoFollowerCreateAndFollowApiCallFailure() { ClusterState leaderState = ClusterState.builder(new ClusterName("remote")) .metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101") - .settings(settings(Version.CURRENT)) + .settings(settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) .numberOfShards(1) .numberOfReplicas(0))) .build(); - AutoFollowPattern autoFollowPattern = - new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("logs-*"), + null, null, null, null, null, null, null, null, null, null, null); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -267,19 +268,19 @@ public void testAutoFollowerCreateAndFollowApiCallFailure() { AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override void getLeaderClusterState(Map headers, - String leaderClusterAlias, + String remoteCluster, BiConsumer handler) { handler.accept(leaderState, null); } @Override void createAndFollow(Map headers, - ResumeFollowAction.Request followRequest, + PutFollowAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { - assertThat(followRequest.getLeaderCluster(), equalTo("remote")); + assertThat(followRequest.getRemoteCluster(), equalTo("remote")); assertThat(followRequest.getLeaderIndex(), equalTo("logs-20190101")); - assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); + assertThat(followRequest.getFollowRequest().getFollowerIndex(), equalTo("logs-20190101")); failureHandler.accept(failure); } @@ -294,8 +295,8 @@ void updateAutoFollowMetadata(Function updateFunctio } public void testGetLeaderIndicesToFollow() { - AutoFollowPattern autoFollowPattern = - new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), null, null, null, null, null, null, null, null); + AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), + null, null, null, null, null, null, null, null, null, null, null); Map> headers = new HashMap<>(); ClusterState followerState = ClusterState.builder(new ClusterName("remote")) .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, @@ -306,7 +307,8 @@ public void testGetLeaderIndicesToFollow() { for (int i = 0; i < 5; i++) { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_INDEX_UUID, "metrics-" + i); + .put(IndexMetaData.SETTING_INDEX_UUID, "metrics-" + i) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), i % 2 == 0); imdBuilder.put(IndexMetaData.builder("metrics-" + i) .settings(builder) .numberOfShards(1) @@ -324,34 +326,30 @@ public void testGetLeaderIndicesToFollow() { List result = AutoFollower.getLeaderIndicesToFollow("remote", autoFollowPattern, leaderState, followerState, Collections.emptyList()); result.sort(Comparator.comparing(Index::getName)); - assertThat(result.size(), equalTo(5)); + assertThat(result.size(), equalTo(3)); assertThat(result.get(0).getName(), equalTo("metrics-0")); - assertThat(result.get(1).getName(), equalTo("metrics-1")); - assertThat(result.get(2).getName(), equalTo("metrics-2")); - assertThat(result.get(3).getName(), equalTo("metrics-3")); - assertThat(result.get(4).getName(), equalTo("metrics-4")); + assertThat(result.get(1).getName(), equalTo("metrics-2")); + assertThat(result.get(2).getName(), equalTo("metrics-4")); List followedIndexUUIDs = Collections.singletonList(leaderState.metaData().index("metrics-2").getIndexUUID()); result = AutoFollower.getLeaderIndicesToFollow("remote", autoFollowPattern, leaderState, followerState, followedIndexUUIDs); result.sort(Comparator.comparing(Index::getName)); - assertThat(result.size(), equalTo(4)); + assertThat(result.size(), equalTo(2)); assertThat(result.get(0).getName(), equalTo("metrics-0")); - assertThat(result.get(1).getName(), equalTo("metrics-1")); - assertThat(result.get(2).getName(), equalTo("metrics-3")); - assertThat(result.get(3).getName(), equalTo("metrics-4")); + assertThat(result.get(1).getName(), equalTo("metrics-4")); } public void testGetFollowerIndexName() { AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), null, null, - null, null, null, null, null, null); + null, null, null, null, null, null, null, null, null); assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("metrics-0")); autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), "eu-metrics-0", null, null, - null, null, null, null, null); + null, null, null, null, null, null, null, null); assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), "eu-{{leader_index}}", null, - null, null, null, null, null, null); + null, null, null, null, null, null, null, null, null); assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java index e67509f7ee825..c74afd6075c95 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java @@ -33,10 +33,13 @@ protected GetAutoFollowPatternAction.Response createTestInstance() { Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong()), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(500)); patterns.put(randomAlphaOfLength(4), autoFollowPattern); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java index 2cefc163ee909..3814e561b42c4 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java @@ -42,19 +42,19 @@ protected PutAutoFollowPatternAction.Request createBlankInstance() { protected PutAutoFollowPatternAction.Request createTestInstance() { PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); request.setName(randomAlphaOfLength(4)); - request.setLeaderCluster(randomAlphaOfLength(4)); + request.setRemoteCluster(randomAlphaOfLength(4)); request.setLeaderIndexPatterns(Arrays.asList(generateRandomStringArray(4, 4, false))); if (randomBoolean()) { request.setFollowIndexNamePattern(randomAlphaOfLength(4)); } if (randomBoolean()) { - request.setPollTimeout(TimeValue.timeValueMillis(500)); + request.setReadPollTimeout(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { - request.setMaxBatchOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + request.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); } if (randomBoolean()) { request.setMaxConcurrentReadBatches(randomIntBetween(0, Integer.MAX_VALUE)); @@ -63,10 +63,13 @@ protected PutAutoFollowPatternAction.Request createTestInstance() { request.setMaxConcurrentWriteBatches(randomIntBetween(0, Integer.MAX_VALUE)); } if (randomBoolean()) { - request.setMaxBatchSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); + request.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } if (randomBoolean()) { - request.setMaxWriteBufferSize(randomIntBetween(0, Integer.MAX_VALUE)); + request.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); } return request; } @@ -80,9 +83,9 @@ public void testValidate() { request.setName("name"); validationException = request.validate(); assertThat(validationException, notNullValue()); - assertThat(validationException.getMessage(), containsString("[leader_cluster] is missing")); + assertThat(validationException.getMessage(), containsString("[remote_cluster] is missing")); - request.setLeaderCluster("_alias"); + request.setRemoteCluster("_alias"); validationException = request.validate(); assertThat(validationException, notNullValue()); assertThat(validationException.getMessage(), containsString("[leader_index_patterns] is missing")); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java index f86594b3b693a..1385b383b940c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java @@ -5,10 +5,13 @@ */ package org.elasticsearch.xpack.ccr.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; -public class PutFollowActionRequestTests extends AbstractStreamableTestCase { +import java.io.IOException; + +public class PutFollowActionRequestTests extends AbstractStreamableXContentTestCase { @Override protected PutFollowAction.Request createBlankInstance() { @@ -17,6 +20,20 @@ protected PutFollowAction.Request createBlankInstance() { @Override protected PutFollowAction.Request createTestInstance() { - return new PutFollowAction.Request(ResumeFollowActionRequestTests.createTestRequest()); + PutFollowAction.Request request = new PutFollowAction.Request(); + request.setRemoteCluster(randomAlphaOfLength(4)); + request.setLeaderIndex(randomAlphaOfLength(4)); + request.setFollowRequest(ResumeFollowActionRequestTests.createTestRequest()); + return request; + } + + @Override + protected PutFollowAction.Request doParseInstance(XContentParser parser) throws IOException { + return PutFollowAction.Request.fromXContent(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java index 8101a6db2b7e3..ae9bc1bbd3339 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java @@ -43,37 +43,36 @@ protected boolean supportsUnknownFields() { static ResumeFollowAction.Request createTestRequest() { ResumeFollowAction.Request request = new ResumeFollowAction.Request(); - request.setLeaderCluster(randomAlphaOfLength(4)); - request.setLeaderIndex(randomAlphaOfLength(4)); request.setFollowerIndex(randomAlphaOfLength(4)); if (randomBoolean()) { - request.setMaxBatchOperationCount(randomIntBetween(1, Integer.MAX_VALUE)); + request.setMaxReadRequestOperationCount(randomIntBetween(1, Integer.MAX_VALUE)); } if (randomBoolean()) { - request.setMaxConcurrentReadBatches(randomIntBetween(1, Integer.MAX_VALUE)); + request.setMaxOutstandingReadRequests(randomIntBetween(1, Integer.MAX_VALUE)); } if (randomBoolean()) { - request.setMaxConcurrentWriteBatches(randomIntBetween(1, Integer.MAX_VALUE)); + request.setMaxOutstandingWriteRequests(randomIntBetween(1, Integer.MAX_VALUE)); } if (randomBoolean()) { - request.setMaxBatchSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); + request.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } if (randomBoolean()) { - request.setMaxWriteBufferSize(randomIntBetween(1, Integer.MAX_VALUE)); + request.setMaxWriteBufferCount(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } if (randomBoolean()) { request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { - request.setPollTimeout(TimeValue.timeValueMillis(500)); + request.setReadPollTimeout(TimeValue.timeValueMillis(500)); } return request; } public void testValidate() { ResumeFollowAction.Request request = new ResumeFollowAction.Request(); - request.setLeaderCluster("leader_cluster"); - request.setLeaderIndex("index1"); request.setFollowerIndex("index2"); request.setMaxRetryDelay(TimeValue.ZERO); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index 50c0dd9ca49a0..4df2bb498b69a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -7,6 +7,8 @@ import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.shard.ShardId; @@ -58,17 +60,17 @@ private void startAndAssertAndStopTask(ShardFollowNodeTask task, TestRun testRun assertThat(status.followerGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); final long numberOfFailedFetches = testRun.responses.values().stream().flatMap(List::stream).filter(f -> f.exception != null).count(); - assertThat(status.numberOfFailedFetches(), equalTo(numberOfFailedFetches)); + assertThat(status.failedReadRequests(), equalTo(numberOfFailedFetches)); // the failures were able to be retried so fetch failures should have cleared - assertThat(status.fetchExceptions().entrySet(), hasSize(0)); - assertThat(status.mappingVersion(), equalTo(testRun.finalMappingVersion)); + assertThat(status.readExceptions().entrySet(), hasSize(0)); + assertThat(status.followerMappingVersion(), equalTo(testRun.finalMappingVersion)); }); task.markAsCompleted(); assertBusy(() -> { ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(0)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.outstandingReadRequests(), equalTo(0)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); }); } @@ -79,10 +81,13 @@ private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testR new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), testRun.maxOperationCount, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, concurrency, - TransportResumeFollowAction.DEFAULT_MAX_BATCH_SIZE, + testRun.maxOperationCount, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, concurrency, 10240, + new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap() diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java index 6bd5136e4be56..95f8e86e09657 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java @@ -57,6 +57,7 @@ protected ShardFollowNodeTaskStatus createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomReadExceptions(), randomLong(), randomBoolean() ? new ElasticsearchException("fatal error") : null); @@ -65,7 +66,7 @@ protected ShardFollowNodeTaskStatus createTestInstance() { @Override protected void assertEqualInstances(final ShardFollowNodeTaskStatus expectedInstance, final ShardFollowNodeTaskStatus newInstance) { assertNotSame(expectedInstance, newInstance); - assertThat(newInstance.getLeaderCluster(), equalTo(expectedInstance.getLeaderCluster())); + assertThat(newInstance.getRemoteCluster(), equalTo(expectedInstance.getRemoteCluster())); assertThat(newInstance.leaderIndex(), equalTo(expectedInstance.leaderIndex())); assertThat(newInstance.followerIndex(), equalTo(expectedInstance.followerIndex())); assertThat(newInstance.getShardId(), equalTo(expectedInstance.getShardId())); @@ -73,23 +74,23 @@ protected void assertEqualInstances(final ShardFollowNodeTaskStatus expectedInst assertThat(newInstance.leaderMaxSeqNo(), equalTo(expectedInstance.leaderMaxSeqNo())); assertThat(newInstance.followerGlobalCheckpoint(), equalTo(expectedInstance.followerGlobalCheckpoint())); assertThat(newInstance.lastRequestedSeqNo(), equalTo(expectedInstance.lastRequestedSeqNo())); - assertThat(newInstance.numberOfConcurrentReads(), equalTo(expectedInstance.numberOfConcurrentReads())); - assertThat(newInstance.numberOfConcurrentWrites(), equalTo(expectedInstance.numberOfConcurrentWrites())); - assertThat(newInstance.numberOfQueuedWrites(), equalTo(expectedInstance.numberOfQueuedWrites())); - assertThat(newInstance.mappingVersion(), equalTo(expectedInstance.mappingVersion())); - assertThat(newInstance.totalFetchTimeMillis(), equalTo(expectedInstance.totalFetchTimeMillis())); - assertThat(newInstance.numberOfSuccessfulFetches(), equalTo(expectedInstance.numberOfSuccessfulFetches())); - assertThat(newInstance.numberOfFailedFetches(), equalTo(expectedInstance.numberOfFailedFetches())); - assertThat(newInstance.operationsReceived(), equalTo(expectedInstance.operationsReceived())); - assertThat(newInstance.totalTransferredBytes(), equalTo(expectedInstance.totalTransferredBytes())); - assertThat(newInstance.totalIndexTimeMillis(), equalTo(expectedInstance.totalIndexTimeMillis())); - assertThat(newInstance.numberOfSuccessfulBulkOperations(), equalTo(expectedInstance.numberOfSuccessfulBulkOperations())); - assertThat(newInstance.numberOfFailedBulkOperations(), equalTo(expectedInstance.numberOfFailedBulkOperations())); - assertThat(newInstance.numberOfOperationsIndexed(), equalTo(expectedInstance.numberOfOperationsIndexed())); - assertThat(newInstance.fetchExceptions().size(), equalTo(expectedInstance.fetchExceptions().size())); - assertThat(newInstance.fetchExceptions().keySet(), equalTo(expectedInstance.fetchExceptions().keySet())); - for (final Map.Entry> entry : newInstance.fetchExceptions().entrySet()) { - final Tuple expectedTuple = expectedInstance.fetchExceptions().get(entry.getKey()); + assertThat(newInstance.outstandingReadRequests(), equalTo(expectedInstance.outstandingReadRequests())); + assertThat(newInstance.outstandingWriteRequests(), equalTo(expectedInstance.outstandingWriteRequests())); + assertThat(newInstance.writeBufferOperationCount(), equalTo(expectedInstance.writeBufferOperationCount())); + assertThat(newInstance.followerMappingVersion(), equalTo(expectedInstance.followerMappingVersion())); + assertThat(newInstance.totalReadTimeMillis(), equalTo(expectedInstance.totalReadTimeMillis())); + assertThat(newInstance.successfulReadRequests(), equalTo(expectedInstance.successfulReadRequests())); + assertThat(newInstance.failedReadRequests(), equalTo(expectedInstance.failedReadRequests())); + assertThat(newInstance.operationsReads(), equalTo(expectedInstance.operationsReads())); + assertThat(newInstance.bytesRead(), equalTo(expectedInstance.bytesRead())); + assertThat(newInstance.totalWriteTimeMillis(), equalTo(expectedInstance.totalWriteTimeMillis())); + assertThat(newInstance.successfulWriteRequests(), equalTo(expectedInstance.successfulWriteRequests())); + assertThat(newInstance.failedWriteRequests(), equalTo(expectedInstance.failedWriteRequests())); + assertThat(newInstance.operationWritten(), equalTo(expectedInstance.operationWritten())); + assertThat(newInstance.readExceptions().size(), equalTo(expectedInstance.readExceptions().size())); + assertThat(newInstance.readExceptions().keySet(), equalTo(expectedInstance.readExceptions().keySet())); + for (final Map.Entry> entry : newInstance.readExceptions().entrySet()) { + final Tuple expectedTuple = expectedInstance.readExceptions().get(entry.getKey()); assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1())); // x-content loses the exception final ElasticsearchException expected = expectedTuple.v2(); @@ -100,7 +101,7 @@ protected void assertEqualInstances(final ShardFollowNodeTaskStatus expectedInst anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage())); } - assertThat(newInstance.timeSinceLastFetchMillis(), equalTo(expectedInstance.timeSinceLastFetchMillis())); + assertThat(newInstance.timeSinceLastReadMillis(), equalTo(expectedInstance.timeSinceLastReadMillis())); } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index 1988513c95d3b..aeac0ac451806 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -63,7 +63,12 @@ public class ShardFollowNodeTaskTests extends ESTestCase { private Queue responseSizes; public void testCoordinateReads() { - ShardFollowNodeTask task = createShardFollowTask(8, between(8, 20), between(1, 20), Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 8; + params.maxOutstandingReadRequests = between(8, 20); + params.maxOutstandingWriteRequests = between(1, 20); + + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 3, -1); task.coordinateReads(); assertThat(shardChangesRequests, contains(new long[]{0L, 8L})); // treat this a peak request @@ -73,13 +78,17 @@ public void testCoordinateReads() { {6L, 8L}, {14L, 8L}, {22L, 8L}, {30L, 8L}, {38L, 8L}, {46L, 8L}, {54L, 7L}} )); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(7)); + assertThat(status.outstandingReadRequests(), equalTo(7)); assertThat(status.lastRequestedSeqNo(), equalTo(60L)); } - public void testWriteBuffer() { - // Need to set concurrentWrites to 0, other the write buffer gets flushed immediately: - ShardFollowNodeTask task = createShardFollowTask(64, 1, 0, 32, Long.MAX_VALUE); + public void testMaxWriteBufferCount() { + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 0; // need to set outstandingWrites to 0, other the write buffer gets flushed immediately + params.maxWriteBufferCount = 32; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); @@ -90,17 +99,47 @@ public void testWriteBuffer() { shardChangesRequests.clear(); // Also invokes the coordinatesReads() method: task.innerHandleReadResponse(0L, 63L, generateShardChangesResponse(0, 63, 0L, 128L)); - assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because write buffer is full + assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because write buffer count limit has been reached ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(0)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.outstandingReadRequests(), equalTo(0)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(128L)); } - public void testMaxConcurrentReads() { - ShardFollowNodeTask task = createShardFollowTask(8, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + public void testMaxWriteBufferSize() { + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 0; // need to set outstandingWrites to 0, other the write buffer gets flushed immediately + params.maxWriteBufferSize = new ByteSizeValue(1, ByteSizeUnit.KB); + ShardFollowNodeTask task = createShardFollowTask(params); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + // Also invokes the coordinatesReads() method: + task.innerHandleReadResponse(0L, 63L, generateShardChangesResponse(0, 63, 0L, 128L)); + assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because write buffer size limit has been reached + + ShardFollowNodeTaskStatus status = task.getStatus(); + assertThat(status.outstandingReadRequests(), equalTo(0)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(128L)); + } + + public void testMaxOutstandingReads() { + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 8; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 64, -1); task.coordinateReads(); @@ -109,12 +148,16 @@ public void testMaxConcurrentReads() { assertThat(shardChangesRequests.get(0)[1], equalTo(8L)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.outstandingReadRequests(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(7L)); } public void testTaskCancelled() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 64, -1); task.coordinateReads(); @@ -131,7 +174,11 @@ public void testTaskCancelled() { } public void testTaskCancelledAfterReadLimitHasBeenReached() { - ShardFollowNodeTask task = createShardFollowTask(16, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 16; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 31, -1); task.coordinateReads(); @@ -147,15 +194,21 @@ public void testTaskCancelledAfterReadLimitHasBeenReached() { assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(0)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.outstandingReadRequests(), equalTo(0)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(15L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(31L)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } public void testTaskCancelledAfterWriteBufferLimitHasBeenReached() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, 32, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + params.maxWriteBufferCount = 32; + + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 64, -1); task.coordinateReads(); @@ -171,15 +224,19 @@ public void testTaskCancelledAfterWriteBufferLimitHasBeenReached() { assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(0)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.outstandingReadRequests(), equalTo(0)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(128L)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } public void testReceiveRetryableError() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); int max = randomIntBetween(1, 30); @@ -194,10 +251,10 @@ public void testReceiveRetryableError() { final AtomicLong retryCounter = new AtomicLong(); // before each retry, we assert the fetch failures; after the last retry, the fetch failure should clear beforeSendShardChangesRequest = status -> { - assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); + assertThat(status.failedReadRequests(), equalTo(retryCounter.get())); if (retryCounter.get() > 0) { - assertThat(status.fetchExceptions().entrySet(), hasSize(1)); - final Map.Entry> entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(status.readExceptions().entrySet(), hasSize(1)); + final Map.Entry> entry = status.readExceptions().entrySet().iterator().next(); assertThat(entry.getValue().v1(), equalTo(Math.toIntExact(retryCounter.get()))); assertThat(entry.getKey(), equalTo(0L)); assertThat(entry.getValue().v2(), instanceOf(ShardNotFoundException.class)); @@ -218,18 +275,22 @@ public void testReceiveRetryableError() { assertFalse("task is not stopped", task.isStopped()); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); - assertThat(status.numberOfFailedFetches(), equalTo((long)max)); - assertThat(status.numberOfSuccessfulFetches(), equalTo(1L)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); + assertThat(status.failedReadRequests(), equalTo((long)max)); + assertThat(status.successfulReadRequests(), equalTo(1L)); // the fetch failure has cleared - assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.readExceptions().entrySet(), hasSize(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } public void testEmptyShardChangesResponseShouldClearFetchException() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, -1, -1); readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); @@ -248,17 +309,21 @@ public void testEmptyShardChangesResponseShouldClearFetchException() { assertFalse("task is not stopped", task.isStopped()); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); - assertThat(status.numberOfFailedFetches(), equalTo(1L)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); + assertThat(status.failedReadRequests(), equalTo(1L)); // the fetch failure should have been cleared: - assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.readExceptions().entrySet(), hasSize(0)); assertThat(status.lastRequestedSeqNo(), equalTo(-1L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(-1L)); } public void testReceiveTimeout() { - final ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + final ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); final int numberOfTimeouts = randomIntBetween(1, 32); @@ -272,14 +337,14 @@ public void testReceiveTimeout() { final AtomicInteger counter = new AtomicInteger(); beforeSendShardChangesRequest = status -> { if (counter.get() <= numberOfTimeouts) { - assertThat(status.numberOfSuccessfulFetches(), equalTo(0L)); - assertThat(status.totalFetchTimeMillis(), equalTo(0L)); - assertThat(status.operationsReceived(), equalTo(0L)); - assertThat(status.totalTransferredBytes(), equalTo(0L)); - - assertThat(status.fetchExceptions().entrySet(), hasSize(0)); - assertThat(status.totalFetchTimeMillis(), equalTo(0L)); - assertThat(status.numberOfFailedFetches(), equalTo(0L)); + assertThat(status.successfulReadRequests(), equalTo(0L)); + assertThat(status.totalReadTimeMillis(), equalTo(0L)); + assertThat(status.operationsReads(), equalTo(0L)); + assertThat(status.bytesRead(), equalTo(0L)); + + assertThat(status.readExceptions().entrySet(), hasSize(0)); + assertThat(status.totalReadTimeMillis(), equalTo(0L)); + assertThat(status.failedReadRequests(), equalTo(0L)); } else { // otherwise we will keep looping as if we were repeatedly polling and timing out simulateResponse.set(false); @@ -311,10 +376,10 @@ public void testReceiveTimeout() { assertThat(lastShardChangesRequest[1], equalTo(64L)); final ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfSuccessfulFetches(), equalTo(1L)); - assertThat(status.numberOfFailedFetches(), equalTo(0L)); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.successfulReadRequests(), equalTo(1L)); + assertThat(status.failedReadRequests(), equalTo(0L)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(1)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); assertThat(status.leaderMaxSeqNo(), equalTo(63L)); @@ -322,7 +387,11 @@ public void testReceiveTimeout() { } public void testReceiveNonRetryableError() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); Exception failure = new RuntimeException("replication failed"); @@ -331,8 +400,8 @@ public void testReceiveNonRetryableError() { // since there will be only one failure, this should only be invoked once and there should not be a fetch failure beforeSendShardChangesRequest = status -> { if (invoked.compareAndSet(false, true)) { - assertThat(status.numberOfFailedFetches(), equalTo(0L)); - assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.failedReadRequests(), equalTo(0L)); + assertThat(status.readExceptions().entrySet(), hasSize(0)); } else { fail("invoked twice"); } @@ -346,11 +415,11 @@ public void testReceiveNonRetryableError() { assertTrue("task is stopped", task.isStopped()); assertThat(task.getStatus().getFatalException().getRootCause(), sameInstance(failure)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); - assertThat(status.numberOfFailedFetches(), equalTo(1L)); - assertThat(status.fetchExceptions().entrySet(), hasSize(1)); - final Map.Entry> entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); + assertThat(status.failedReadRequests(), equalTo(1L)); + assertThat(status.readExceptions().entrySet(), hasSize(1)); + final Map.Entry> entry = status.readExceptions().entrySet().iterator().next(); assertThat(entry.getKey(), equalTo(0L)); assertThat(entry.getValue().v2(), instanceOf(ElasticsearchException.class)); assertNotNull(entry.getValue().v2().getCause()); @@ -362,7 +431,11 @@ public void testReceiveNonRetryableError() { } public void testHandleReadResponse() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); @@ -373,17 +446,21 @@ public void testHandleReadResponse() { assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.mappingVersion(), equalTo(0L)); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerMappingVersion(), equalTo(0L)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } public void testReceiveLessThanRequested() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); @@ -400,14 +477,18 @@ public void testReceiveLessThanRequested() { assertThat(shardChangesRequests.get(0)[1], equalTo(43L)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } public void testCancelAndReceiveLessThanRequested() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); @@ -423,14 +504,18 @@ public void testCancelAndReceiveLessThanRequested() { assertThat(shardChangesRequests.size(), equalTo(0)); assertThat(bulkShardOperationRequests.size(), equalTo(0)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(0)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.outstandingReadRequests(), equalTo(0)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } public void testReceiveNothingExpectedSomething() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); @@ -446,14 +531,18 @@ public void testReceiveNothingExpectedSomething() { assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } public void testMappingUpdate() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); mappingVersions.add(1L); @@ -465,16 +554,20 @@ public void testMappingUpdate() { assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.mappingVersion(), equalTo(1L)); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerMappingVersion(), equalTo(1L)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } public void testMappingUpdateRetryableError() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); int max = randomIntBetween(1, 30); @@ -490,16 +583,20 @@ public void testMappingUpdateRetryableError() { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(task.isStopped(), equalTo(false)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.mappingVersion(), equalTo(1L)); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerMappingVersion(), equalTo(1L)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } public void testMappingUpdateNonRetryableError() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); mappingUpdateFailures.add(new RuntimeException()); @@ -510,21 +607,27 @@ public void testMappingUpdateNonRetryableError() { assertThat(bulkShardOperationRequests.size(), equalTo(0)); assertThat(task.isStopped(), equalTo(true)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.mappingVersion(), equalTo(0L)); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.followerMappingVersion(), equalTo(0L)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } public void testCoordinateWrites() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 128; + params.maxOutstandingReadRequests = 1; + params.maxWriteRequestOperationCount = 64; + params.maxOutstandingWriteRequests = 1; + + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); assertThat(shardChangesRequests.size(), equalTo(1)); assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); - assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(128L)); ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); // Also invokes coordinatesWrites() @@ -534,15 +637,18 @@ public void testCoordinateWrites() { assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.outstandingReadRequests(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } - public void testMaxConcurrentWrites() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 2, Integer.MAX_VALUE, Long.MAX_VALUE); + public void testMaxOutstandingWrites() { + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxWriteRequestOperationCount = 64; + params.maxOutstandingWriteRequests = 2; + ShardFollowNodeTask task = createShardFollowTask(params); ShardChangesAction.Response response = generateShardChangesResponse(0, 256, 0L, 256L); // Also invokes coordinatesWrites() task.innerHandleReadResponse(0L, 64L, response); @@ -552,9 +658,10 @@ public void testMaxConcurrentWrites() { assertThat(bulkShardOperationRequests.get(1), equalTo(Arrays.asList(response.getOperations()).subList(64, 128))); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentWrites(), equalTo(2)); + assertThat(status.outstandingWriteRequests(), equalTo(2)); - task = createShardFollowTask(64, 1, 4, Integer.MAX_VALUE, Long.MAX_VALUE); + params.maxOutstandingWriteRequests = 4; // change to 4 outstanding writers + task = createShardFollowTask(params); response = generateShardChangesResponse(0, 256, 0L, 256L); // Also invokes coordinatesWrites() task.innerHandleReadResponse(0L, 64L, response); @@ -566,11 +673,14 @@ public void testMaxConcurrentWrites() { assertThat(bulkShardOperationRequests.get(3), equalTo(Arrays.asList(response.getOperations()).subList(192, 256))); status = task.getStatus(); - assertThat(status.numberOfConcurrentWrites(), equalTo(4)); + assertThat(status.outstandingWriteRequests(), equalTo(4)); } - public void testMaxBatchOperationCount() { - ShardFollowNodeTask task = createShardFollowTask(8, 1, 32, Integer.MAX_VALUE, Long.MAX_VALUE); + public void testMaxWriteRequestCount() { + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxWriteRequestOperationCount = 8; + params.maxOutstandingWriteRequests = 32; + ShardFollowNodeTask task = createShardFollowTask(params); ShardChangesAction.Response response = generateShardChangesResponse(0, 256, 0L, 256L); // Also invokes coordinatesWrites() task.innerHandleReadResponse(0L, 64L, response); @@ -582,11 +692,15 @@ public void testMaxBatchOperationCount() { } ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentWrites(), equalTo(32)); + assertThat(status.outstandingWriteRequests(), equalTo(32)); } public void testRetryableError() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); @@ -609,12 +723,16 @@ public void testRetryableError() { } assertThat(task.isStopped(), equalTo(false)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(1)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } public void testNonRetryableError() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); @@ -631,12 +749,18 @@ public void testNonRetryableError() { assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); assertThat(task.isStopped(), equalTo(true)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.outstandingWriteRequests(), equalTo(1)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } - public void testMaxBatchBytesLimit() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 128, Integer.MAX_VALUE, 1L); + public void testMaxWriteRequestSize() { + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxWriteRequestSize = new ByteSizeValue(1, ByteSizeUnit.BYTES); + params.maxOutstandingWriteRequests = 128; + + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 64, -1); task.coordinateReads(); @@ -652,7 +776,12 @@ public void testMaxBatchBytesLimit() { } public void testHandleWriteResponse() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxWriteRequestOperationCount = 64; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); task.coordinateReads(); @@ -675,7 +804,7 @@ public void testHandleWriteResponse() { assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); ShardFollowNodeTaskStatus status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.outstandingReadRequests(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); assertThat(status.followerGlobalCheckpoint(), equalTo(63L)); @@ -695,24 +824,40 @@ public void testComputeDelay() { assertThat(ShardFollowNodeTask.computeDelay(1024, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); } - private ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, - int maxConcurrentReadBatches, - int maxConcurrentWriteBatches, - int bufferWriteLimit, - long maxBatchSizeInBytes) { + static final class ShardFollowTaskParams { + private String remoteCluster = null; + private ShardId followShardId = new ShardId("follow_index", "", 0); + private ShardId leaderShardId = new ShardId("leader_index", "", 0); + private int maxReadRequestOperationCount = Integer.MAX_VALUE; + private ByteSizeValue maxReadRequestSize = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + private int maxOutstandingReadRequests = Integer.MAX_VALUE; + private int maxWriteRequestOperationCount = Integer.MAX_VALUE; + private ByteSizeValue maxWriteRequestSize = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + private int maxOutstandingWriteRequests = Integer.MAX_VALUE; + private int maxWriteBufferCount = Integer.MAX_VALUE; + private ByteSizeValue maxWriteBufferSize = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + private TimeValue maxRetryDelay = TimeValue.ZERO; + private TimeValue readPollTimeout = TimeValue.ZERO; + private Map headers = Collections.emptyMap(); + } + + private ShardFollowNodeTask createShardFollowTask(ShardFollowTaskParams params) { AtomicBoolean stopped = new AtomicBoolean(false); - ShardFollowTask params = new ShardFollowTask( - null, - new ShardId("follow_index", "", 0), - new ShardId("leader_index", "", 0), - maxBatchOperationCount, - maxConcurrentReadBatches, - new ByteSizeValue(maxBatchSizeInBytes, ByteSizeUnit.BYTES), - maxConcurrentWriteBatches, - bufferWriteLimit, - TimeValue.ZERO, - TimeValue.ZERO, - Collections.emptyMap() + ShardFollowTask followTask = new ShardFollowTask( + params.remoteCluster, + params.followShardId, + params.leaderShardId, + params.maxReadRequestOperationCount, + params.maxReadRequestSize, + params.maxOutstandingReadRequests, + params.maxWriteRequestOperationCount, + params.maxWriteRequestSize, + params.maxOutstandingWriteRequests, + params.maxWriteBufferCount, + params.maxWriteBufferSize, + params.maxRetryDelay, + params.readPollTimeout, + params.headers ); shardChangesRequests = new ArrayList<>(); @@ -726,7 +871,7 @@ private ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, maxSeqNos = new LinkedList<>(); responseSizes = new LinkedList<>(); return new ShardFollowNodeTask( - 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { + 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), followTask, scheduler, System::nanoTime) { @Override protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 96bc2f04f5920..d2f09c3900dfd 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -368,9 +368,13 @@ private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), between(1, 64), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), between(1, 8), + between(1, 64), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), - between(1, 4), 10240, + between(1, 4), + 10240, + new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap() @@ -433,7 +437,7 @@ protected void innerSendShardChangesRequest(long from, int maxOperationCount, Co return; } Translog.Operation[] ops = ShardChangesAction.getOperations(indexShard, seqNoStats.getGlobalCheckpoint(), from, - maxOperationCount, recordedLeaderIndexHistoryUUID, params.getMaxBatchSize()); + maxOperationCount, recordedLeaderIndexHistoryUUID, params.getMaxReadRequestSize()); // hard code mapping version; this is ok, as mapping updates are not tested here final ShardChangesAction.Response response = new ShardChangesAction.Response( 1L, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java index 865d18e606717..1dfe4a9897075 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java @@ -30,10 +30,13 @@ protected ShardFollowTask createTestInstance() { new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), randomIntBetween(1, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + randomIntBetween(1, Integer.MAX_VALUE), randomIntBetween(1, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(1, Integer.MAX_VALUE), randomIntBetween(1, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), TimeValue.parseTimeValue(randomTimeValue(), ""), TimeValue.parseTimeValue(randomTimeValue(), ""), randomBoolean() ? null : Collections.singletonMap("key", "value") diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/StatsResponsesTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/StatsResponsesTests.java index c93da38666e22..e4830413dff3b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/StatsResponsesTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/StatsResponsesTests.java @@ -50,6 +50,7 @@ protected FollowStatsAction.StatsResponses createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), Collections.emptyNavigableMap(), randomLong(), randomBoolean() ? new ElasticsearchException("fatal error") : null); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java index 84df243bd9499..3f6b2fcb0e3ef 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java @@ -33,7 +33,7 @@ public void testInnerDelete() { List existingPatterns = new ArrayList<>(); existingPatterns.add("transactions-*"); existingAutoFollowPatterns.put("name1", - new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null)); + new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null, null, null, null)); List existingUUIDS = new ArrayList<>(); existingUUIDS.add("_val"); @@ -44,7 +44,7 @@ public void testInnerDelete() { List existingPatterns = new ArrayList<>(); existingPatterns.add("logs-*"); existingAutoFollowPatterns.put("name2", - new AutoFollowPattern("asia_cluster", existingPatterns, null, null, null, null, null, null, null, null)); + new AutoFollowPattern("asia_cluster", existingPatterns, null, null, null, null, null, null, null, null, null, null, null)); List existingUUIDS = new ArrayList<>(); existingUUIDS.add("_val"); @@ -63,7 +63,7 @@ public void testInnerDelete() { .custom(AutoFollowMetadata.TYPE); assertThat(result.getPatterns().size(), equalTo(1)); assertThat(result.getPatterns().get("name2"), notNullValue()); - assertThat(result.getPatterns().get("name2").getLeaderCluster(), equalTo("asia_cluster")); + assertThat(result.getPatterns().get("name2").getRemoteCluster(), equalTo("asia_cluster")); assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); assertThat(result.getFollowedLeaderIndexUUIDs().get("name2"), notNullValue()); assertThat(result.getHeaders().size(), equalTo(1)); @@ -78,7 +78,7 @@ public void testInnerDeleteDoesNotExist() { List existingPatterns = new ArrayList<>(); existingPatterns.add("transactions-*"); existingAutoFollowPatterns.put("name1", - new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null)); + new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null, null, null, null)); existingHeaders.put("key", Collections.singletonMap("key", "val")); } ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster")) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternActionTests.java index ffc2d115091af..e2c7f327ab942 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternActionTests.java @@ -23,10 +23,10 @@ public class TransportGetAutoFollowPatternActionTests extends ESTestCase { public void testGetAutoFollowPattern() { Map patterns = new HashMap<>(); - patterns.put("name1", - new AutoFollowPattern("test_alias1", Collections.singletonList("index-*"), null, null, null, null, null, null, null, null)); - patterns.put("name2", - new AutoFollowPattern("test_alias1", Collections.singletonList("index-*"), null, null, null, null, null, null, null, null)); + patterns.put("name1", new AutoFollowPattern( + "test_alias1", Collections.singletonList("index-*"), null, null, null, null, null, null, null, null, null, null, null)); + patterns.put("name2", new AutoFollowPattern( + "test_alias1", Collections.singletonList("index-*"), null, null, null, null, null, null, null, null, null, null, null)); MetaData metaData = MetaData.builder() .putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) .build(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java index 7c4368d317f24..ac556d47c85dd 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java @@ -30,7 +30,7 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase { public void testInnerPut() { PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); request.setName("name1"); - request.setLeaderCluster("eu_cluster"); + request.setRemoteCluster("eu_cluster"); request.setLeaderIndexPatterns(Collections.singletonList("logs-*")); ClusterState localState = ClusterState.builder(new ClusterName("us_cluster")) @@ -45,7 +45,7 @@ public void testInnerPut() { AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); - assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderCluster(), equalTo("eu_cluster")); + assertThat(autoFollowMetadata.getPatterns().get("name1").getRemoteCluster(), equalTo("eu_cluster")); assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().size(), equalTo(1)); assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().get(0), equalTo("logs-*")); assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); @@ -55,7 +55,7 @@ public void testInnerPut() { public void testInnerPut_existingLeaderIndices() { PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); request.setName("name1"); - request.setLeaderCluster("eu_cluster"); + request.setRemoteCluster("eu_cluster"); request.setLeaderIndexPatterns(Collections.singletonList("logs-*")); ClusterState localState = ClusterState.builder(new ClusterName("us_cluster")) @@ -86,7 +86,7 @@ public void testInnerPut_existingLeaderIndices() { AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); - assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderCluster(), equalTo("eu_cluster")); + assertThat(autoFollowMetadata.getPatterns().get("name1").getRemoteCluster(), equalTo("eu_cluster")); assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().size(), equalTo(1)); assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().get(0), equalTo("logs-*")); assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); @@ -96,14 +96,14 @@ public void testInnerPut_existingLeaderIndices() { public void testInnerPut_existingLeaderIndicesAndAutoFollowMetadata() { PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(); request.setName("name1"); - request.setLeaderCluster("eu_cluster"); + request.setRemoteCluster("eu_cluster"); request.setLeaderIndexPatterns(Arrays.asList("logs-*", "transactions-*")); Map existingAutoFollowPatterns = new HashMap<>(); List existingPatterns = new ArrayList<>(); existingPatterns.add("transactions-*"); existingAutoFollowPatterns.put("name1", - new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null)); + new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null, null, null, null)); Map> existingAlreadyFollowedIndexUUIDS = new HashMap<>(); List existingUUIDS = new ArrayList<>(); existingUUIDS.add("_val"); @@ -133,7 +133,7 @@ public void testInnerPut_existingLeaderIndicesAndAutoFollowMetadata() { AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); - assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderCluster(), equalTo("eu_cluster")); + assertThat(autoFollowMetadata.getPatterns().get("name1").getRemoteCluster(), equalTo("eu_cluster")); assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().size(), equalTo(2)); assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().get(0), equalTo("logs-*")); assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().get(1), equalTo("transactions-*")); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java index 01f22723d14dc..5a4b41e3f456a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.Map; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.ccr.action.TransportResumeFollowAction.validate; import static org.hamcrest.Matchers.equalTo; @@ -35,20 +34,8 @@ public void testValidation() throws IOException { customMetaData.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, "uuid"); customMetaData.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY, "_na_"); - ResumeFollowAction.Request request = IndexFollowingIT.resumeFollow("index1", "index2"); + ResumeFollowAction.Request request = IndexFollowingIT.resumeFollow("index2"); String[] UUIDs = new String[]{"uuid"}; - { - // should fail, because leader index does not exist - Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, null, null, null, null)); - assertThat(e.getMessage(), equalTo("leader index [leader_cluster:index1] does not exist")); - } - { - // should fail, because follow index does not exist - IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, emptyMap()); - Exception e = expectThrows(IllegalArgumentException.class, - () -> validate(request, leaderIMD, null, null, null)); - assertThat(e.getMessage(), equalTo("follow index [index2] does not exist")); - } { IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, null); IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, null); @@ -83,7 +70,7 @@ public void testValidation() throws IOException { IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, null); IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, customMetaData); Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); - assertThat(e.getMessage(), equalTo("leader index [leader_cluster:index1] does not have soft deletes enabled")); + assertThat(e.getMessage(), equalTo("leader index [index1] does not have soft deletes enabled")); } { // should fail because the follower index does not have soft deletes enabled diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java index 07b0fc078aca0..4a201e37355a9 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -77,10 +79,13 @@ public void testUnfollowRunningShardFollowTasks() { new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), 1024, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 1, - TransportResumeFollowAction.DEFAULT_MAX_BATCH_SIZE, + 1024, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 1, 10240, + new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap() diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java index 219bf7187baad..f3e0c2d5bd7b3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java @@ -91,23 +91,24 @@ public void testToXContent() throws IOException { final long lastRequestedSeqNo = randomNonNegativeLong(); final int numberOfConcurrentReads = randomIntBetween(1, Integer.MAX_VALUE); final int numberOfConcurrentWrites = randomIntBetween(1, Integer.MAX_VALUE); - final int numberOfQueuedWrites = randomIntBetween(0, Integer.MAX_VALUE); - final long mappingVersion = randomIntBetween(0, Integer.MAX_VALUE); - final long totalFetchTimeMillis = randomLongBetween(0, 4096); - final long totalFetchTookTimeMillis = randomLongBetween(0, 4096); - final long numberOfSuccessfulFetches = randomNonNegativeLong(); - final long numberOfFailedFetches = randomLongBetween(0, 8); - final long operationsReceived = randomNonNegativeLong(); - final long totalTransferredBytes = randomNonNegativeLong(); - final long totalIndexTimeMillis = randomNonNegativeLong(); - final long numberOfSuccessfulBulkOperations = randomNonNegativeLong(); - final long numberOfFailedBulkOperations = randomNonNegativeLong(); - final long numberOfOperationsIndexed = randomNonNegativeLong(); + final int writeBufferOperationCount = randomIntBetween(0, Integer.MAX_VALUE); + final long writeBufferSizeInBytes = randomNonNegativeLong(); + final long followerMappingVersion = randomIntBetween(0, Integer.MAX_VALUE); + final long totalReadTimeMillis = randomLongBetween(0, 4096); + final long totalReadRemoteExecTimeMillis = randomLongBetween(0, 4096); + final long successfulReadRequests = randomNonNegativeLong(); + final long failedReadRequests = randomLongBetween(0, 8); + final long operationsRead = randomNonNegativeLong(); + final long bytesRead = randomNonNegativeLong(); + final long totalWriteTimeMillis = randomNonNegativeLong(); + final long successfulWriteRequests = randomNonNegativeLong(); + final long failedWriteRequests = randomNonNegativeLong(); + final long operationWritten = randomNonNegativeLong(); final NavigableMap> fetchExceptions = new TreeMap<>(Collections.singletonMap( randomNonNegativeLong(), Tuple.tuple(randomIntBetween(0, Integer.MAX_VALUE), new ElasticsearchException("shard is sad")))); - final long timeSinceLastFetchMillis = randomNonNegativeLong(); + final long timeSinceLastReadMillis = randomNonNegativeLong(); final ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( "leader_cluster", "leader_index", @@ -120,20 +121,21 @@ public void testToXContent() throws IOException { lastRequestedSeqNo, numberOfConcurrentReads, numberOfConcurrentWrites, - numberOfQueuedWrites, - mappingVersion, - totalFetchTimeMillis, - totalFetchTookTimeMillis, - numberOfSuccessfulFetches, - numberOfFailedFetches, - operationsReceived, - totalTransferredBytes, - totalIndexTimeMillis, - numberOfSuccessfulBulkOperations, - numberOfFailedBulkOperations, - numberOfOperationsIndexed, + writeBufferOperationCount, + writeBufferSizeInBytes, + followerMappingVersion, + totalReadTimeMillis, + totalReadRemoteExecTimeMillis, + successfulReadRequests, + failedReadRequests, + operationsRead, + bytesRead, + totalWriteTimeMillis, + successfulWriteRequests, + failedWriteRequests, + operationWritten, fetchExceptions, - timeSinceLastFetchMillis, + timeSinceLastReadMillis, new ElasticsearchException("fatal error")); final FollowStatsMonitoringDoc document = new FollowStatsMonitoringDoc("_cluster", timestamp, intervalMillis, node, status); final BytesReference xContent = XContentHelper.toXContent(document, XContentType.JSON, false); @@ -154,7 +156,7 @@ public void testToXContent() throws IOException { + "\"timestamp\":\"" + new DateTime(nodeTimestamp, DateTimeZone.UTC).toString() + "\"" + "}," + "\"ccr_stats\":{" - + "\"leader_cluster\":\"leader_cluster\"," + + "\"remote_cluster\":\"leader_cluster\"," + "\"leader_index\":\"leader_index\"," + "\"follower_index\":\"follower_index\"," + "\"shard_id\":" + shardId + "," @@ -163,21 +165,22 @@ public void testToXContent() throws IOException { + "\"follower_global_checkpoint\":" + followerGlobalCheckpoint + "," + "\"follower_max_seq_no\":" + followerMaxSeqNo + "," + "\"last_requested_seq_no\":" + lastRequestedSeqNo + "," - + "\"number_of_concurrent_reads\":" + numberOfConcurrentReads + "," - + "\"number_of_concurrent_writes\":" + numberOfConcurrentWrites + "," - + "\"number_of_queued_writes\":" + numberOfQueuedWrites + "," - + "\"mapping_version\":" + mappingVersion + "," - + "\"total_fetch_time_millis\":" + totalFetchTimeMillis + "," - + "\"total_fetch_leader_time_millis\":" + totalFetchTookTimeMillis + "," - + "\"number_of_successful_fetches\":" + numberOfSuccessfulFetches + "," - + "\"number_of_failed_fetches\":" + numberOfFailedFetches + "," - + "\"operations_received\":" + operationsReceived + "," - + "\"total_transferred_bytes\":" + totalTransferredBytes + "," - + "\"total_index_time_millis\":" + totalIndexTimeMillis +"," - + "\"number_of_successful_bulk_operations\":" + numberOfSuccessfulBulkOperations + "," - + "\"number_of_failed_bulk_operations\":" + numberOfFailedBulkOperations + "," - + "\"number_of_operations_indexed\":" + numberOfOperationsIndexed + "," - + "\"fetch_exceptions\":[" + + "\"outstanding_read_requests\":" + numberOfConcurrentReads + "," + + "\"outstanding_write_requests\":" + numberOfConcurrentWrites + "," + + "\"write_buffer_operation_count\":" + writeBufferOperationCount + "," + + "\"write_buffer_size_in_bytes\":" + writeBufferSizeInBytes + "," + + "\"follower_mapping_version\":" + followerMappingVersion + "," + + "\"total_read_time_millis\":" + totalReadTimeMillis + "," + + "\"total_read_remote_exec_time_millis\":" + totalReadRemoteExecTimeMillis + "," + + "\"successful_read_requests\":" + successfulReadRequests + "," + + "\"failed_read_requests\":" + failedReadRequests + "," + + "\"operations_read\":" + operationsRead + "," + + "\"bytes_read\":" + bytesRead + "," + + "\"total_write_time_millis\":" + totalWriteTimeMillis +"," + + "\"successful_write_requests\":" + successfulWriteRequests + "," + + "\"failed_write_requests\":" + failedWriteRequests + "," + + "\"operations_written\":" + operationWritten + "," + + "\"read_exceptions\":[" + "{" + "\"from_seq_no\":" + fetchExceptions.keySet().iterator().next() + "," + "\"retries\":" + fetchExceptions.values().iterator().next().v1() + "," @@ -187,7 +190,7 @@ public void testToXContent() throws IOException { + "}" + "}" + "]," - + "\"time_since_last_fetch_millis\":" + timeSinceLastFetchMillis + "," + + "\"time_since_last_read_millis\":" + timeSinceLastReadMillis + "," + "\"fatal_exception\":{\"type\":\"exception\",\"reason\":\"fatal error\"}" + "}" + "}")); @@ -197,7 +200,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { final NavigableMap> fetchExceptions = new TreeMap<>(Collections.singletonMap(1L, Tuple.tuple(2, new ElasticsearchException("shard is sad")))); final ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( - "leader_cluster", + "remote_cluster", "leader_index", "follower_index", 0, @@ -210,6 +213,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { 1, 1, 1, + 1, 100, 50, 10, @@ -234,7 +238,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { for (Map.Entry entry : serializedStatus.entrySet()) { String fieldName = entry.getKey(); Map fieldMapping = (Map) followStatsMapping.get(fieldName); - assertThat(fieldMapping, notNullValue()); + assertThat("no field mapping for field [" + fieldName + "]", fieldMapping, notNullValue()); Object fieldValue = entry.getValue(); String fieldType = (String) fieldMapping.get("type"); @@ -246,7 +250,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { anyOf(equalTo("keyword"), equalTo("text"))); } else { // Manual test specific object fields and if not just fail: - if (fieldName.equals("fetch_exceptions")) { + if (fieldName.equals("read_exceptions")) { assertThat(fieldType, equalTo("nested")); assertThat(((Map) fieldMapping.get("properties")).size(), equalTo(3)); assertThat(XContentMapValues.extractValue("properties.from_seq_no.type", fieldMapping), equalTo("long")); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 1fe4ebf08503a..242a925ab1c4d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -239,7 +239,8 @@ private static String[] sqlAcknowledgementMessages(OperationMode currentMode, Op switch (currentMode) { case TRIAL: case PLATINUM: - return new String[] { "JDBC support will be disabled, but you can continue to use SQL CLI and REST endpoint" }; + return new String[] { + "JDBC and ODBC support will be disabled, but you can continue to use SQL CLI and REST endpoint" }; } break; } @@ -628,6 +629,20 @@ public synchronized boolean isJdbcAllowed() { return licensed && localStatus.active; } + /** + * Determine if ODBC support should be enabled. + *

+ * ODBC is available only in for {@link OperationMode#PLATINUM} and {@link OperationMode#TRIAL} licences + */ + public synchronized boolean isOdbcAllowed() { + Status localStatus = status; + OperationMode operationMode = localStatus.mode; + + boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM; + + return licensed && localStatus.active; + } + public synchronized boolean isTrialLicense() { return status.mode == OperationMode.TRIAL; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index 8f01c56c3f064..379dbe7a421b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -175,87 +175,112 @@ public int hashCode() { public static class AutoFollowPattern implements Writeable, ToXContentObject { - public static final ParseField LEADER_CLUSTER_FIELD = new ParseField("leader_cluster"); + public static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); public static final ParseField LEADER_PATTERNS_FIELD = new ParseField("leader_index_patterns"); public static final ParseField FOLLOW_PATTERN_FIELD = new ParseField("follow_index_pattern"); - public static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count"); - public static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches"); - public static final ParseField MAX_BATCH_SIZE = new ParseField("max_batch_size"); - public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); + public static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); + public static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); + public static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); + public static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); + public static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); + public static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); + public static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); - public static final ParseField POLL_TIMEOUT = new ParseField("poll_timeout"); + public static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("auto_follow_pattern", args -> new AutoFollowPattern((String) args[0], (List) args[1], (String) args[2], (Integer) args[3], - (Integer) args[4], (ByteSizeValue) args[5], (Integer) args[6], (Integer) args[7], (TimeValue) args[8], - (TimeValue) args[9])); + (ByteSizeValue) args[4], (Integer) args[5], (Integer) args[6], (ByteSizeValue) args[7], (Integer) args[8], + (Integer) args[9], (ByteSizeValue) args[10], (TimeValue) args[11], (TimeValue) args[12])); static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_CLUSTER_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), REMOTE_CLUSTER_FIELD); PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), LEADER_PATTERNS_FIELD); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOW_PATTERN_FIELD); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_OPERATION_COUNT); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_READ_BATCHES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_READ_REQUEST_OPERATION_COUNT); PARSER.declareField( ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_BATCH_SIZE.getPreferredName()), - MAX_BATCH_SIZE, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), + MAX_READ_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_OUTSTANDING_READ_REQUESTS); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_REQUEST_OPERATION_COUNT); + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()), + MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_OUTSTANDING_WRITE_REQUESTS); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_COUNT); + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), + MAX_WRITE_BUFFER_SIZE, ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_WRITE_BATCHES); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_SIZE); PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), POLL_TIMEOUT.getPreferredName()), - POLL_TIMEOUT, ObjectParser.ValueType.STRING); + (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), + READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); } - private final String leaderCluster; + private final String remoteCluster; private final List leaderIndexPatterns; private final String followIndexPattern; - private final Integer maxBatchOperationCount; - private final Integer maxConcurrentReadBatches; - private final ByteSizeValue maxBatchSize; - private final Integer maxConcurrentWriteBatches; - private final Integer maxWriteBufferSize; + private final Integer maxReadRequestOperationCount; + private final ByteSizeValue maxReadRequestSize; + private final Integer maxOutstandingReadRequests; + private final Integer maxWriteRequestOperationCount; + private final ByteSizeValue maxWriteRequestSize; + private final Integer maxOutstandingWriteRequests; + private final Integer maxWriteBufferCount; + private final ByteSizeValue maxWriteBufferSize; private final TimeValue maxRetryDelay; private final TimeValue pollTimeout; - public AutoFollowPattern(String leaderCluster, + public AutoFollowPattern(String remoteCluster, List leaderIndexPatterns, String followIndexPattern, - Integer maxBatchOperationCount, - Integer maxConcurrentReadBatches, - ByteSizeValue maxBatchSize, - Integer maxConcurrentWriteBatches, - Integer maxWriteBufferSize, - TimeValue maxRetryDelay, + Integer maxReadRequestOperationCount, + ByteSizeValue maxReadRequestSize, + Integer maxOutstandingReadRequests, + Integer maxWriteRequestOperationCount, + ByteSizeValue maxWriteRequestSize, + Integer maxOutstandingWriteRequests, + Integer maxWriteBufferCount, + ByteSizeValue maxWriteBufferSize, TimeValue maxRetryDelay, TimeValue pollTimeout) { - this.leaderCluster = leaderCluster; + this.remoteCluster = remoteCluster; this.leaderIndexPatterns = leaderIndexPatterns; this.followIndexPattern = followIndexPattern; - this.maxBatchOperationCount = maxBatchOperationCount; - this.maxConcurrentReadBatches = maxConcurrentReadBatches; - this.maxBatchSize = maxBatchSize; - this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + this.maxReadRequestOperationCount = maxReadRequestOperationCount; + this.maxReadRequestSize = maxReadRequestSize; + this.maxOutstandingReadRequests = maxOutstandingReadRequests; + this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; + this.maxWriteRequestSize = maxWriteRequestSize; + this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; + this.maxWriteBufferCount = maxWriteBufferCount; this.maxWriteBufferSize = maxWriteBufferSize; this.maxRetryDelay = maxRetryDelay; this.pollTimeout = pollTimeout; } public AutoFollowPattern(StreamInput in) throws IOException { - leaderCluster = in.readString(); + remoteCluster = in.readString(); leaderIndexPatterns = in.readList(StreamInput::readString); followIndexPattern = in.readOptionalString(); - maxBatchOperationCount = in.readOptionalVInt(); - maxConcurrentReadBatches = in.readOptionalVInt(); - maxBatchSize = in.readOptionalWriteable(ByteSizeValue::new); - maxConcurrentWriteBatches = in.readOptionalVInt(); - maxWriteBufferSize = in.readOptionalVInt(); + maxReadRequestOperationCount = in.readOptionalVInt(); + maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); + maxOutstandingReadRequests = in.readOptionalVInt(); + maxWriteRequestOperationCount = in.readOptionalVInt(); + maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); + maxOutstandingWriteRequests = in.readOptionalVInt(); + maxWriteBufferCount = in.readOptionalVInt(); + maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); maxRetryDelay = in.readOptionalTimeValue(); pollTimeout = in.readOptionalTimeValue(); } @@ -268,8 +293,8 @@ public static boolean match(List leaderIndexPatterns, String indexName) return Regex.simpleMatch(leaderIndexPatterns, indexName); } - public String getLeaderCluster() { - return leaderCluster; + public String getRemoteCluster() { + return remoteCluster; } public List getLeaderIndexPatterns() { @@ -280,23 +305,35 @@ public String getFollowIndexPattern() { return followIndexPattern; } - public Integer getMaxBatchOperationCount() { - return maxBatchOperationCount; + public Integer getMaxReadRequestOperationCount() { + return maxReadRequestOperationCount; } - public Integer getMaxConcurrentReadBatches() { - return maxConcurrentReadBatches; + public Integer getMaxOutstandingReadRequests() { + return maxOutstandingReadRequests; } - public ByteSizeValue getMaxBatchSize() { - return maxBatchSize; + public ByteSizeValue getMaxReadRequestSize() { + return maxReadRequestSize; } - public Integer getMaxConcurrentWriteBatches() { - return maxConcurrentWriteBatches; + public Integer getMaxWriteRequestOperationCount() { + return maxWriteRequestOperationCount; } - public Integer getMaxWriteBufferSize() { + public ByteSizeValue getMaxWriteRequestSize() { + return maxWriteRequestSize; + } + + public Integer getMaxOutstandingWriteRequests() { + return maxOutstandingWriteRequests; + } + + public Integer getMaxWriteBufferCount() { + return maxWriteBufferCount; + } + + public ByteSizeValue getMaxWriteBufferSize() { return maxWriteBufferSize; } @@ -310,45 +347,57 @@ public TimeValue getPollTimeout() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(leaderCluster); + out.writeString(remoteCluster); out.writeStringList(leaderIndexPatterns); out.writeOptionalString(followIndexPattern); - out.writeOptionalVInt(maxBatchOperationCount); - out.writeOptionalVInt(maxConcurrentReadBatches); - out.writeOptionalWriteable(maxBatchSize); - out.writeOptionalVInt(maxConcurrentWriteBatches); - out.writeOptionalVInt(maxWriteBufferSize); + out.writeOptionalVInt(maxReadRequestOperationCount); + out.writeOptionalWriteable(maxReadRequestSize); + out.writeOptionalVInt(maxOutstandingReadRequests); + out.writeOptionalVInt(maxWriteRequestOperationCount); + out.writeOptionalWriteable(maxWriteRequestSize); + out.writeOptionalVInt(maxOutstandingWriteRequests); + out.writeOptionalVInt(maxWriteBufferCount); + out.writeOptionalWriteable(maxWriteBufferSize); out.writeOptionalTimeValue(maxRetryDelay); out.writeOptionalTimeValue(pollTimeout); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(LEADER_CLUSTER_FIELD.getPreferredName(), leaderCluster); + builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), remoteCluster); builder.array(LEADER_PATTERNS_FIELD.getPreferredName(), leaderIndexPatterns.toArray(new String[0])); if (followIndexPattern != null) { builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), followIndexPattern); } - if (maxBatchOperationCount != null) { - builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + if (maxReadRequestOperationCount != null) { + builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); + } + if (maxReadRequestSize != null) { + builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); + } + if (maxOutstandingReadRequests != null) { + builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); + } + if (maxWriteRequestOperationCount != null) { + builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); } - if (maxConcurrentReadBatches != null) { - builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + if (maxWriteRequestSize != null) { + builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); } - if (maxBatchSize != null) { - builder.field(MAX_BATCH_SIZE.getPreferredName(), maxBatchSize.getStringRep()); + if (maxOutstandingWriteRequests != null) { + builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); } - if (maxConcurrentWriteBatches != null) { - builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + if (maxWriteBufferCount != null){ + builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); } - if (maxWriteBufferSize != null){ - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + if (maxWriteBufferSize != null) { + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); } if (maxRetryDelay != null) { builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay); } if (pollTimeout != null) { - builder.field(POLL_TIMEOUT.getPreferredName(), pollTimeout); + builder.field(READ_POLL_TIMEOUT.getPreferredName(), pollTimeout); } return builder; } @@ -363,13 +412,16 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; AutoFollowPattern that = (AutoFollowPattern) o; - return Objects.equals(leaderCluster, that.leaderCluster) && + return Objects.equals(remoteCluster, that.remoteCluster) && Objects.equals(leaderIndexPatterns, that.leaderIndexPatterns) && Objects.equals(followIndexPattern, that.followIndexPattern) && - Objects.equals(maxBatchOperationCount, that.maxBatchOperationCount) && - Objects.equals(maxConcurrentReadBatches, that.maxConcurrentReadBatches) && - Objects.equals(maxBatchSize, that.maxBatchSize) && - Objects.equals(maxConcurrentWriteBatches, that.maxConcurrentWriteBatches) && + Objects.equals(maxReadRequestOperationCount, that.maxReadRequestOperationCount) && + Objects.equals(maxReadRequestSize, that.maxReadRequestSize) && + Objects.equals(maxOutstandingReadRequests, that.maxOutstandingReadRequests) && + Objects.equals(maxWriteRequestOperationCount, that.maxWriteRequestOperationCount) && + Objects.equals(maxWriteRequestSize, that.maxWriteRequestSize) && + Objects.equals(maxOutstandingWriteRequests, that.maxOutstandingWriteRequests) && + Objects.equals(maxWriteBufferCount, that.maxWriteBufferCount) && Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && Objects.equals(maxRetryDelay, that.maxRetryDelay) && Objects.equals(pollTimeout, that.pollTimeout); @@ -378,13 +430,16 @@ public boolean equals(Object o) { @Override public int hashCode() { return Objects.hash( - leaderCluster, + remoteCluster, leaderIndexPatterns, followIndexPattern, - maxBatchOperationCount, - maxConcurrentReadBatches, - maxBatchSize, - maxConcurrentWriteBatches, + maxReadRequestOperationCount, + maxReadRequestSize, + maxOutstandingReadRequests, + maxWriteRequestOperationCount, + maxWriteRequestSize, + maxOutstandingWriteRequests, + maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, pollTimeout); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java index e21729df58b54..b8f645eea4430 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java @@ -34,7 +34,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status { public static final String STATUS_PARSER_NAME = "shard-follow-node-task-status"; - private static final ParseField LEADER_CLUSTER = new ParseField("leader_cluster"); + private static final ParseField LEADER_CLUSTER = new ParseField("remote_cluster"); private static final ParseField LEADER_INDEX = new ParseField("leader_index"); private static final ParseField FOLLOWER_INDEX = new ParseField("follower_index"); private static final ParseField SHARD_ID = new ParseField("shard_id"); @@ -43,22 +43,23 @@ public class ShardFollowNodeTaskStatus implements Task.Status { private static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); private static final ParseField FOLLOWER_MAX_SEQ_NO_FIELD = new ParseField("follower_max_seq_no"); private static final ParseField LAST_REQUESTED_SEQ_NO_FIELD = new ParseField("last_requested_seq_no"); - private static final ParseField NUMBER_OF_CONCURRENT_READS_FIELD = new ParseField("number_of_concurrent_reads"); - private static final ParseField NUMBER_OF_CONCURRENT_WRITES_FIELD = new ParseField("number_of_concurrent_writes"); - private static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes"); - private static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version"); - private static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis"); - private static final ParseField TOTAL_FETCH_LEADER_TIME_MILLIS_FIELD = new ParseField("total_fetch_leader_time_millis"); - private static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches"); - private static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches"); - private static final ParseField OPERATIONS_RECEIVED_FIELD = new ParseField("operations_received"); - private static final ParseField TOTAL_TRANSFERRED_BYTES = new ParseField("total_transferred_bytes"); - private static final ParseField TOTAL_INDEX_TIME_MILLIS_FIELD = new ParseField("total_index_time_millis"); - private static final ParseField NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD = new ParseField("number_of_successful_bulk_operations"); - private static final ParseField NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD = new ParseField("number_of_failed_bulk_operations"); - private static final ParseField NUMBER_OF_OPERATIONS_INDEXED_FIELD = new ParseField("number_of_operations_indexed"); - private static final ParseField FETCH_EXCEPTIONS = new ParseField("fetch_exceptions"); - private static final ParseField TIME_SINCE_LAST_FETCH_MILLIS_FIELD = new ParseField("time_since_last_fetch_millis"); + private static final ParseField OUTSTANDING_READ_REQUESTS = new ParseField("outstanding_read_requests"); + private static final ParseField OUTSTANDING_WRITE_REQUESTS = new ParseField("outstanding_write_requests"); + private static final ParseField WRITE_BUFFER_OPERATION_COUNT_FIELD = new ParseField("write_buffer_operation_count"); + private static final ParseField WRITE_BUFFER_SIZE_IN_BYTES_FIELD = new ParseField("write_buffer_size_in_bytes"); + private static final ParseField FOLLOWER_MAPPING_VERSION_FIELD = new ParseField("follower_mapping_version"); + private static final ParseField TOTAL_READ_TIME_MILLIS_FIELD = new ParseField("total_read_time_millis"); + private static final ParseField TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD = new ParseField("total_read_remote_exec_time_millis"); + private static final ParseField SUCCESSFUL_READ_REQUESTS_FIELD = new ParseField("successful_read_requests"); + private static final ParseField FAILED_READ_REQUESTS_FIELD = new ParseField("failed_read_requests"); + private static final ParseField OPERATIONS_READ_FIELD = new ParseField("operations_read"); + private static final ParseField BYTES_READ = new ParseField("bytes_read"); + private static final ParseField TOTAL_WRITE_TIME_MILLIS_FIELD = new ParseField("total_write_time_millis"); + private static final ParseField SUCCESSFUL_WRITE_REQUESTS_FIELD = new ParseField("successful_write_requests"); + private static final ParseField FAILED_WRITE_REQUEST_FIELD = new ParseField("failed_write_requests"); + private static final ParseField OPERATIONS_WRITTEN = new ParseField("operations_written"); + private static final ParseField READ_EXCEPTIONS = new ParseField("read_exceptions"); + private static final ParseField TIME_SINCE_LAST_READ_MILLIS_FIELD = new ParseField("time_since_last_read_millis"); private static final ParseField FATAL_EXCEPTION = new ParseField("fatal_exception"); @SuppressWarnings("unchecked") @@ -89,18 +90,19 @@ public class ShardFollowNodeTaskStatus implements Task.Status { (long) args[20], (long) args[21], (long) args[22], + (long) args[23], new TreeMap<>( - ((List>>) args[23]) + ((List>>) args[24]) .stream() .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))), - (long) args[24], - (ElasticsearchException) args[25])); + (long) args[25], + (ElasticsearchException) args[26])); - public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry"; + public static final String READ_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-read-exceptions-entry"; - static final ConstructingObjectParser>, Void> FETCH_EXCEPTIONS_ENTRY_PARSER = + static final ConstructingObjectParser>, Void> READ_EXCEPTIONS_ENTRY_PARSER = new ConstructingObjectParser<>( - FETCH_EXCEPTIONS_ENTRY_PARSER_NAME, + READ_EXCEPTIONS_ENTRY_PARSER_NAME, args -> new AbstractMap.SimpleEntry<>((long) args[0], Tuple.tuple((Integer)args[1], (ElasticsearchException)args[2]))); static { @@ -113,44 +115,45 @@ public class ShardFollowNodeTaskStatus implements Task.Status { STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_LEADER_TIME_MILLIS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); - STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_PARSER, FETCH_EXCEPTIONS); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_FETCH_MILLIS_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), OUTSTANDING_READ_REQUESTS); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), OUTSTANDING_WRITE_REQUESTS); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), WRITE_BUFFER_OPERATION_COUNT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), WRITE_BUFFER_SIZE_IN_BYTES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAPPING_VERSION_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_READ_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), SUCCESSFUL_READ_REQUESTS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FAILED_READ_REQUESTS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_READ_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), BYTES_READ); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_WRITE_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), SUCCESSFUL_WRITE_REQUESTS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FAILED_WRITE_REQUEST_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_WRITTEN); + STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), READ_EXCEPTIONS_ENTRY_PARSER, READ_EXCEPTIONS); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_READ_MILLIS_FIELD); STATUS_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), FATAL_EXCEPTION); } - static final ParseField FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); - static final ParseField FETCH_EXCEPTIONS_RETRIES = new ParseField("retries"); - static final ParseField FETCH_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); + static final ParseField READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); + static final ParseField READ_EXCEPTIONS_RETRIES = new ParseField("retries"); + static final ParseField READ_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); static { - FETCH_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO); - FETCH_EXCEPTIONS_ENTRY_PARSER.declareInt(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_RETRIES); - FETCH_EXCEPTIONS_ENTRY_PARSER.declareObject( + READ_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO); + READ_EXCEPTIONS_ENTRY_PARSER.declareInt(ConstructingObjectParser.constructorArg(), READ_EXCEPTIONS_RETRIES); + READ_EXCEPTIONS_ENTRY_PARSER.declareObject( ConstructingObjectParser.constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), - FETCH_EXCEPTIONS_ENTRY_EXCEPTION); + READ_EXCEPTIONS_ENTRY_EXCEPTION); } - private final String leaderCluster; + private final String remoteCluster; - public String getLeaderCluster() { - return leaderCluster; + public String getRemoteCluster() { + return remoteCluster; } private final String leaderIndex; @@ -201,100 +204,106 @@ public long lastRequestedSeqNo() { return lastRequestedSeqNo; } - private final int numberOfConcurrentReads; + private final int outstandingReadRequests; - public int numberOfConcurrentReads() { - return numberOfConcurrentReads; + public int outstandingReadRequests() { + return outstandingReadRequests; } - private final int numberOfConcurrentWrites; + private final int outstandingWriteRequests; - public int numberOfConcurrentWrites() { - return numberOfConcurrentWrites; + public int outstandingWriteRequests() { + return outstandingWriteRequests; } - private final int numberOfQueuedWrites; + private final int writeBufferOperationCount; - public int numberOfQueuedWrites() { - return numberOfQueuedWrites; + public int writeBufferOperationCount() { + return writeBufferOperationCount; } - private final long mappingVersion; + private final long writeBufferSizeInBytes; - public long mappingVersion() { - return mappingVersion; + public long writeBufferSizeInBytes() { + return writeBufferSizeInBytes; } - private final long totalFetchTimeMillis; + private final long followerMappingVersion; - public long totalFetchTimeMillis() { - return totalFetchTimeMillis; + public long followerMappingVersion() { + return followerMappingVersion; } - private final long totalFetchLeaderTimeMillis; + private final long totalReadTimeMillis; - public long totalFetchLeaderTimeMillis() { - return totalFetchLeaderTimeMillis; + public long totalReadTimeMillis() { + return totalReadTimeMillis; } - private final long numberOfSuccessfulFetches; + private final long totalReadRemoteExecTimeMillis; - public long numberOfSuccessfulFetches() { - return numberOfSuccessfulFetches; + public long totalReadRemoteExecTimeMillis() { + return totalReadRemoteExecTimeMillis; } - private final long numberOfFailedFetches; + private final long successfulReadRequests; - public long numberOfFailedFetches() { - return numberOfFailedFetches; + public long successfulReadRequests() { + return successfulReadRequests; } - private final long operationsReceived; + private final long failedReadRequests; - public long operationsReceived() { - return operationsReceived; + public long failedReadRequests() { + return failedReadRequests; } - private final long totalTransferredBytes; + private final long operationsReads; - public long totalTransferredBytes() { - return totalTransferredBytes; + public long operationsReads() { + return operationsReads; } - private final long totalIndexTimeMillis; + private final long bytesRead; - public long totalIndexTimeMillis() { - return totalIndexTimeMillis; + public long bytesRead() { + return bytesRead; } - private final long numberOfSuccessfulBulkOperations; + private final long totalWriteTimeMillis; - public long numberOfSuccessfulBulkOperations() { - return numberOfSuccessfulBulkOperations; + public long totalWriteTimeMillis() { + return totalWriteTimeMillis; } - private final long numberOfFailedBulkOperations; + private final long successfulWriteRequests; - public long numberOfFailedBulkOperations() { - return numberOfFailedBulkOperations; + public long successfulWriteRequests() { + return successfulWriteRequests; } - private final long numberOfOperationsIndexed; + private final long failedWriteRequests; - public long numberOfOperationsIndexed() { - return numberOfOperationsIndexed; + public long failedWriteRequests() { + return failedWriteRequests; } - private final NavigableMap> fetchExceptions; + private final long operationWritten; - public NavigableMap> fetchExceptions() { - return fetchExceptions; + public long operationWritten() { + return operationWritten; } - private final long timeSinceLastFetchMillis; + private final NavigableMap> readExceptions; - public long timeSinceLastFetchMillis() { - return timeSinceLastFetchMillis; + public NavigableMap> readExceptions() { + return readExceptions; + } + + private final long timeSinceLastReadMillis; + + public long timeSinceLastReadMillis() { + return timeSinceLastReadMillis; } private final ElasticsearchException fatalException; @@ -304,7 +313,7 @@ public ElasticsearchException getFatalException() { } public ShardFollowNodeTaskStatus( - final String leaderCluster, + final String remoteCluster, final String leaderIndex, final String followerIndex, final int shardId, @@ -313,24 +322,25 @@ public ShardFollowNodeTaskStatus( final long followerGlobalCheckpoint, final long followerMaxSeqNo, final long lastRequestedSeqNo, - final int numberOfConcurrentReads, - final int numberOfConcurrentWrites, - final int numberOfQueuedWrites, - final long mappingVersion, - final long totalFetchTimeMillis, - final long totalFetchLeaderTimeMillis, - final long numberOfSuccessfulFetches, - final long numberOfFailedFetches, - final long operationsReceived, - final long totalTransferredBytes, - final long totalIndexTimeMillis, - final long numberOfSuccessfulBulkOperations, - final long numberOfFailedBulkOperations, - final long numberOfOperationsIndexed, - final NavigableMap> fetchExceptions, - final long timeSinceLastFetchMillis, + final int outstandingReadRequests, + final int outstandingWriteRequests, + final int writeBufferOperationCount, + final long writeBufferSizeInBytes, + final long followerMappingVersion, + final long totalReadTimeMillis, + final long totalReadRemoteExecTimeMillis, + final long successfulReadRequests, + final long failedReadRequests, + final long operationsReads, + final long bytesRead, + final long totalWriteTimeMillis, + final long successfulWriteRequests, + final long failedWriteRequests, + final long operationWritten, + final NavigableMap> readExceptions, + final long timeSinceLastReadMillis, final ElasticsearchException fatalException) { - this.leaderCluster = leaderCluster; + this.remoteCluster = remoteCluster; this.leaderIndex = leaderIndex; this.followerIndex = followerIndex; this.shardId = shardId; @@ -339,27 +349,28 @@ public ShardFollowNodeTaskStatus( this.followerGlobalCheckpoint = followerGlobalCheckpoint; this.followerMaxSeqNo = followerMaxSeqNo; this.lastRequestedSeqNo = lastRequestedSeqNo; - this.numberOfConcurrentReads = numberOfConcurrentReads; - this.numberOfConcurrentWrites = numberOfConcurrentWrites; - this.numberOfQueuedWrites = numberOfQueuedWrites; - this.mappingVersion = mappingVersion; - this.totalFetchTimeMillis = totalFetchTimeMillis; - this.totalFetchLeaderTimeMillis = totalFetchLeaderTimeMillis; - this.numberOfSuccessfulFetches = numberOfSuccessfulFetches; - this.numberOfFailedFetches = numberOfFailedFetches; - this.operationsReceived = operationsReceived; - this.totalTransferredBytes = totalTransferredBytes; - this.totalIndexTimeMillis = totalIndexTimeMillis; - this.numberOfSuccessfulBulkOperations = numberOfSuccessfulBulkOperations; - this.numberOfFailedBulkOperations = numberOfFailedBulkOperations; - this.numberOfOperationsIndexed = numberOfOperationsIndexed; - this.fetchExceptions = Objects.requireNonNull(fetchExceptions); - this.timeSinceLastFetchMillis = timeSinceLastFetchMillis; + this.outstandingReadRequests = outstandingReadRequests; + this.outstandingWriteRequests = outstandingWriteRequests; + this.writeBufferOperationCount = writeBufferOperationCount; + this.writeBufferSizeInBytes = writeBufferSizeInBytes; + this.followerMappingVersion = followerMappingVersion; + this.totalReadTimeMillis = totalReadTimeMillis; + this.totalReadRemoteExecTimeMillis = totalReadRemoteExecTimeMillis; + this.successfulReadRequests = successfulReadRequests; + this.failedReadRequests = failedReadRequests; + this.operationsReads = operationsReads; + this.bytesRead = bytesRead; + this.totalWriteTimeMillis = totalWriteTimeMillis; + this.successfulWriteRequests = successfulWriteRequests; + this.failedWriteRequests = failedWriteRequests; + this.operationWritten = operationWritten; + this.readExceptions = Objects.requireNonNull(readExceptions); + this.timeSinceLastReadMillis = timeSinceLastReadMillis; this.fatalException = fatalException; } public ShardFollowNodeTaskStatus(final StreamInput in) throws IOException { - this.leaderCluster = in.readOptionalString(); + this.remoteCluster = in.readOptionalString(); this.leaderIndex = in.readString(); this.followerIndex = in.readString(); this.shardId = in.readVInt(); @@ -368,23 +379,24 @@ public ShardFollowNodeTaskStatus(final StreamInput in) throws IOException { this.followerGlobalCheckpoint = in.readZLong(); this.followerMaxSeqNo = in.readZLong(); this.lastRequestedSeqNo = in.readZLong(); - this.numberOfConcurrentReads = in.readVInt(); - this.numberOfConcurrentWrites = in.readVInt(); - this.numberOfQueuedWrites = in.readVInt(); - this.mappingVersion = in.readVLong(); - this.totalFetchTimeMillis = in.readVLong(); - this.totalFetchLeaderTimeMillis = in.readVLong(); - this.numberOfSuccessfulFetches = in.readVLong(); - this.numberOfFailedFetches = in.readVLong(); - this.operationsReceived = in.readVLong(); - this.totalTransferredBytes = in.readVLong(); - this.totalIndexTimeMillis = in.readVLong(); - this.numberOfSuccessfulBulkOperations = in.readVLong(); - this.numberOfFailedBulkOperations = in.readVLong(); - this.numberOfOperationsIndexed = in.readVLong(); - this.fetchExceptions = + this.outstandingReadRequests = in.readVInt(); + this.outstandingWriteRequests = in.readVInt(); + this.writeBufferOperationCount = in.readVInt(); + this.writeBufferSizeInBytes = in.readVLong(); + this.followerMappingVersion = in.readVLong(); + this.totalReadTimeMillis = in.readVLong(); + this.totalReadRemoteExecTimeMillis = in.readVLong(); + this.successfulReadRequests = in.readVLong(); + this.failedReadRequests = in.readVLong(); + this.operationsReads = in.readVLong(); + this.bytesRead = in.readVLong(); + this.totalWriteTimeMillis = in.readVLong(); + this.successfulWriteRequests = in.readVLong(); + this.failedWriteRequests = in.readVLong(); + this.operationWritten = in.readVLong(); + this.readExceptions = new TreeMap<>(in.readMap(StreamInput::readVLong, stream -> Tuple.tuple(stream.readVInt(), stream.readException()))); - this.timeSinceLastFetchMillis = in.readZLong(); + this.timeSinceLastReadMillis = in.readZLong(); this.fatalException = in.readException(); } @@ -395,7 +407,7 @@ public String getWriteableName() { @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeOptionalString(leaderCluster); + out.writeOptionalString(remoteCluster); out.writeString(leaderIndex); out.writeString(followerIndex); out.writeVInt(shardId); @@ -404,28 +416,29 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeZLong(followerGlobalCheckpoint); out.writeZLong(followerMaxSeqNo); out.writeZLong(lastRequestedSeqNo); - out.writeVInt(numberOfConcurrentReads); - out.writeVInt(numberOfConcurrentWrites); - out.writeVInt(numberOfQueuedWrites); - out.writeVLong(mappingVersion); - out.writeVLong(totalFetchTimeMillis); - out.writeVLong(totalFetchLeaderTimeMillis); - out.writeVLong(numberOfSuccessfulFetches); - out.writeVLong(numberOfFailedFetches); - out.writeVLong(operationsReceived); - out.writeVLong(totalTransferredBytes); - out.writeVLong(totalIndexTimeMillis); - out.writeVLong(numberOfSuccessfulBulkOperations); - out.writeVLong(numberOfFailedBulkOperations); - out.writeVLong(numberOfOperationsIndexed); + out.writeVInt(outstandingReadRequests); + out.writeVInt(outstandingWriteRequests); + out.writeVInt(writeBufferOperationCount); + out.writeVLong(writeBufferSizeInBytes); + out.writeVLong(followerMappingVersion); + out.writeVLong(totalReadTimeMillis); + out.writeVLong(totalReadRemoteExecTimeMillis); + out.writeVLong(successfulReadRequests); + out.writeVLong(failedReadRequests); + out.writeVLong(operationsReads); + out.writeVLong(bytesRead); + out.writeVLong(totalWriteTimeMillis); + out.writeVLong(successfulWriteRequests); + out.writeVLong(failedWriteRequests); + out.writeVLong(operationWritten); out.writeMap( - fetchExceptions, + readExceptions, StreamOutput::writeVLong, (stream, value) -> { stream.writeVInt(value.v1()); stream.writeException(value.v2()); }); - out.writeZLong(timeSinceLastFetchMillis); + out.writeZLong(timeSinceLastReadMillis); out.writeException(fatalException); } @@ -440,7 +453,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa } public XContentBuilder toXContentFragment(final XContentBuilder builder, final Params params) throws IOException { - builder.field(LEADER_CLUSTER.getPreferredName(), leaderCluster); + builder.field(LEADER_CLUSTER.getPreferredName(), remoteCluster); builder.field(LEADER_INDEX.getPreferredName(), leaderIndex); builder.field(FOLLOWER_INDEX.getPreferredName(), followerIndex); builder.field(SHARD_ID.getPreferredName(), shardId); @@ -449,40 +462,44 @@ public XContentBuilder toXContentFragment(final XContentBuilder builder, final P builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); builder.field(FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), followerMaxSeqNo); builder.field(LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), lastRequestedSeqNo); - builder.field(NUMBER_OF_CONCURRENT_READS_FIELD.getPreferredName(), numberOfConcurrentReads); - builder.field(NUMBER_OF_CONCURRENT_WRITES_FIELD.getPreferredName(), numberOfConcurrentWrites); - builder.field(NUMBER_OF_QUEUED_WRITES_FIELD.getPreferredName(), numberOfQueuedWrites); - builder.field(MAPPING_VERSION_FIELD.getPreferredName(), mappingVersion); + builder.field(OUTSTANDING_READ_REQUESTS.getPreferredName(), outstandingReadRequests); + builder.field(OUTSTANDING_WRITE_REQUESTS.getPreferredName(), outstandingWriteRequests); + builder.field(WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(), writeBufferOperationCount); + builder.humanReadableField( + WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(), + "write_buffer_size", + new ByteSizeValue(writeBufferSizeInBytes)); + builder.field(FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(), followerMappingVersion); builder.humanReadableField( - TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(), - "total_fetch_time", - new TimeValue(totalFetchTimeMillis, TimeUnit.MILLISECONDS)); + TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(), + "total_read_time", + new TimeValue(totalReadTimeMillis, TimeUnit.MILLISECONDS)); builder.humanReadableField( - TOTAL_FETCH_LEADER_TIME_MILLIS_FIELD.getPreferredName(), - "total_fetch_leader_time", - new TimeValue(totalFetchLeaderTimeMillis, TimeUnit.MILLISECONDS)); - builder.field(NUMBER_OF_SUCCESSFUL_FETCHES_FIELD.getPreferredName(), numberOfSuccessfulFetches); - builder.field(NUMBER_OF_FAILED_FETCHES_FIELD.getPreferredName(), numberOfFailedFetches); - builder.field(OPERATIONS_RECEIVED_FIELD.getPreferredName(), operationsReceived); + TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(), + "total_read_remote_exec_time", + new TimeValue(totalReadRemoteExecTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(), successfulReadRequests); + builder.field(FAILED_READ_REQUESTS_FIELD.getPreferredName(), failedReadRequests); + builder.field(OPERATIONS_READ_FIELD.getPreferredName(), operationsReads); builder.humanReadableField( - TOTAL_TRANSFERRED_BYTES.getPreferredName(), - "total_transferred", - new ByteSizeValue(totalTransferredBytes, ByteSizeUnit.BYTES)); + BYTES_READ.getPreferredName(), + "total_read", + new ByteSizeValue(bytesRead, ByteSizeUnit.BYTES)); builder.humanReadableField( - TOTAL_INDEX_TIME_MILLIS_FIELD.getPreferredName(), - "total_index_time", - new TimeValue(totalIndexTimeMillis, TimeUnit.MILLISECONDS)); - builder.field(NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfSuccessfulBulkOperations); - builder.field(NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfFailedBulkOperations); - builder.field(NUMBER_OF_OPERATIONS_INDEXED_FIELD.getPreferredName(), numberOfOperationsIndexed); - builder.startArray(FETCH_EXCEPTIONS.getPreferredName()); + TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(), + "total_write_time", + new TimeValue(totalWriteTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(), successfulWriteRequests); + builder.field(FAILED_WRITE_REQUEST_FIELD.getPreferredName(), failedWriteRequests); + builder.field(OPERATIONS_WRITTEN.getPreferredName(), operationWritten); + builder.startArray(READ_EXCEPTIONS.getPreferredName()); { - for (final Map.Entry> entry : fetchExceptions.entrySet()) { + for (final Map.Entry> entry : readExceptions.entrySet()) { builder.startObject(); { - builder.field(FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); - builder.field(FETCH_EXCEPTIONS_RETRIES.getPreferredName(), entry.getValue().v1()); - builder.field(FETCH_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); + builder.field(READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); + builder.field(READ_EXCEPTIONS_RETRIES.getPreferredName(), entry.getValue().v1()); + builder.field(READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); builder.startObject(); { ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue().v2()); @@ -494,9 +511,9 @@ public XContentBuilder toXContentFragment(final XContentBuilder builder, final P } builder.endArray(); builder.humanReadableField( - TIME_SINCE_LAST_FETCH_MILLIS_FIELD.getPreferredName(), - "time_since_last_fetch", - new TimeValue(timeSinceLastFetchMillis, TimeUnit.MILLISECONDS)); + TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(), + "time_since_last_read", + new TimeValue(timeSinceLastReadMillis, TimeUnit.MILLISECONDS)); if (fatalException != null) { builder.field(FATAL_EXCEPTION.getPreferredName()); builder.startObject(); @@ -519,7 +536,7 @@ public boolean equals(final Object o) { final ShardFollowNodeTaskStatus that = (ShardFollowNodeTaskStatus) o; String fatalExceptionMessage = fatalException != null ? fatalException.getMessage() : null; String otherFatalExceptionMessage = that.fatalException != null ? that.fatalException.getMessage() : null; - return leaderCluster.equals(that.leaderCluster) && + return remoteCluster.equals(that.remoteCluster) && leaderIndex.equals(that.leaderIndex) && followerIndex.equals(that.followerIndex) && shardId == that.shardId && @@ -528,27 +545,28 @@ public boolean equals(final Object o) { followerGlobalCheckpoint == that.followerGlobalCheckpoint && followerMaxSeqNo == that.followerMaxSeqNo && lastRequestedSeqNo == that.lastRequestedSeqNo && - numberOfConcurrentReads == that.numberOfConcurrentReads && - numberOfConcurrentWrites == that.numberOfConcurrentWrites && - numberOfQueuedWrites == that.numberOfQueuedWrites && - mappingVersion == that.mappingVersion && - totalFetchTimeMillis == that.totalFetchTimeMillis && - totalFetchLeaderTimeMillis == that.totalFetchLeaderTimeMillis && - numberOfSuccessfulFetches == that.numberOfSuccessfulFetches && - numberOfFailedFetches == that.numberOfFailedFetches && - operationsReceived == that.operationsReceived && - totalTransferredBytes == that.totalTransferredBytes && - numberOfSuccessfulBulkOperations == that.numberOfSuccessfulBulkOperations && - numberOfFailedBulkOperations == that.numberOfFailedBulkOperations && - numberOfOperationsIndexed == that.numberOfOperationsIndexed && + outstandingReadRequests == that.outstandingReadRequests && + outstandingWriteRequests == that.outstandingWriteRequests && + writeBufferOperationCount == that.writeBufferOperationCount && + writeBufferSizeInBytes == that.writeBufferSizeInBytes && + followerMappingVersion == that.followerMappingVersion && + totalReadTimeMillis == that.totalReadTimeMillis && + totalReadRemoteExecTimeMillis == that.totalReadRemoteExecTimeMillis && + successfulReadRequests == that.successfulReadRequests && + failedReadRequests == that.failedReadRequests && + operationsReads == that.operationsReads && + bytesRead == that.bytesRead && + successfulWriteRequests == that.successfulWriteRequests && + failedWriteRequests == that.failedWriteRequests && + operationWritten == that.operationWritten && /* * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal * up to the key set and their messages. Note that we are relying on the fact that the fetch exceptions are ordered by * keys. */ - fetchExceptions.keySet().equals(that.fetchExceptions.keySet()) && - getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)) && - timeSinceLastFetchMillis == that.timeSinceLastFetchMillis && + readExceptions.keySet().equals(that.readExceptions.keySet()) && + getReadExceptionMessages(this).equals(getReadExceptionMessages(that)) && + timeSinceLastReadMillis == that.timeSinceLastReadMillis && Objects.equals(fatalExceptionMessage, otherFatalExceptionMessage); } @@ -556,7 +574,7 @@ public boolean equals(final Object o) { public int hashCode() { String fatalExceptionMessage = fatalException != null ? fatalException.getMessage() : null; return Objects.hash( - leaderCluster, + remoteCluster, leaderIndex, followerIndex, shardId, @@ -565,31 +583,32 @@ public int hashCode() { followerGlobalCheckpoint, followerMaxSeqNo, lastRequestedSeqNo, - numberOfConcurrentReads, - numberOfConcurrentWrites, - numberOfQueuedWrites, - mappingVersion, - totalFetchTimeMillis, - totalFetchLeaderTimeMillis, - numberOfSuccessfulFetches, - numberOfFailedFetches, - operationsReceived, - totalTransferredBytes, - numberOfSuccessfulBulkOperations, - numberOfFailedBulkOperations, - numberOfOperationsIndexed, + outstandingReadRequests, + outstandingWriteRequests, + writeBufferOperationCount, + writeBufferSizeInBytes, + followerMappingVersion, + totalReadTimeMillis, + totalReadRemoteExecTimeMillis, + successfulReadRequests, + failedReadRequests, + operationsReads, + bytesRead, + successfulWriteRequests, + failedWriteRequests, + operationWritten, /* * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the * messages. Note that we are relying on the fact that the fetch exceptions are ordered by keys. */ - fetchExceptions.keySet(), - getFetchExceptionMessages(this), - timeSinceLastFetchMillis, + readExceptions.keySet(), + getReadExceptionMessages(this), + timeSinceLastReadMillis, fatalExceptionMessage); } - private static List getFetchExceptionMessages(final ShardFollowNodeTaskStatus status) { - return status.fetchExceptions().values().stream().map(t -> t.v2().getMessage()).collect(Collectors.toList()); + private static List getReadExceptionMessages(final ShardFollowNodeTaskStatus status) { + return status.readExceptions().values().stream().map(t -> t.v2().getMessage()).collect(Collectors.toList()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java index a5a45fea3f6b8..c20cda9ab4a4a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java @@ -7,13 +7,14 @@ package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.Objects; public class PauseFollowAction extends Action { @@ -29,27 +30,26 @@ public AcknowledgedResponse newResponse() { return new AcknowledgedResponse(); } - public static class Request extends ActionRequest { + public static class Request extends MasterNodeRequest { - private String followIndex; + private final String followIndex; - public String getFollowIndex() { - return followIndex; + public Request(String followIndex) { + this.followIndex = Objects.requireNonNull(followIndex, "followIndex"); } - public void setFollowIndex(final String followIndex) { - this.followIndex = followIndex; + public Request(StreamInput in) throws IOException { + super(in); + this.followIndex = in.readString(); } - @Override - public ActionRequestValidationException validate() { - return null; + public String getFollowIndex() { + return followIndex; } @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - followIndex = in.readString(); + public ActionRequestValidationException validate() { + return null; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index 22c574d26d2ca..b2f966bba749a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -25,6 +25,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern.REMOTE_CLUSTER_FIELD; public class PutAutoFollowPatternAction extends Action { @@ -42,32 +43,41 @@ public AcknowledgedResponse newResponse() { public static class Request extends AcknowledgedRequest implements ToXContentObject { - static final ParseField LEADER_CLUSTER_FIELD = new ParseField("leader_cluster"); - private static final ObjectParser PARSER = new ObjectParser<>("put_auto_follow_pattern_request", Request::new); private static final ParseField NAME_FIELD = new ParseField("name"); static { PARSER.declareString(Request::setName, NAME_FIELD); - PARSER.declareString(Request::setLeaderCluster, AutoFollowPattern.LEADER_CLUSTER_FIELD); + PARSER.declareString(Request::setRemoteCluster, REMOTE_CLUSTER_FIELD); PARSER.declareStringArray(Request::setLeaderIndexPatterns, AutoFollowPattern.LEADER_PATTERNS_FIELD); PARSER.declareString(Request::setFollowIndexNamePattern, AutoFollowPattern.FOLLOW_PATTERN_FIELD); - PARSER.declareInt(Request::setMaxBatchOperationCount, AutoFollowPattern.MAX_BATCH_OPERATION_COUNT); - PARSER.declareInt(Request::setMaxConcurrentReadBatches, AutoFollowPattern.MAX_CONCURRENT_READ_BATCHES); + PARSER.declareInt(Request::setMaxReadRequestOperationCount, AutoFollowPattern.MAX_READ_REQUEST_OPERATION_COUNT); PARSER.declareField( - Request::setMaxBatchSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), AutoFollowPattern.MAX_BATCH_SIZE.getPreferredName()), - AutoFollowPattern.MAX_BATCH_SIZE, + Request::setMaxReadRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), AutoFollowPattern.MAX_READ_REQUEST_SIZE.getPreferredName()), + AutoFollowPattern.MAX_READ_REQUEST_SIZE, ObjectParser.ValueType.STRING); - PARSER.declareInt(Request::setMaxConcurrentWriteBatches, AutoFollowPattern.MAX_CONCURRENT_WRITE_BATCHES); - PARSER.declareInt(Request::setMaxWriteBufferSize, AutoFollowPattern.MAX_WRITE_BUFFER_SIZE); + PARSER.declareInt(Request::setMaxConcurrentReadBatches, AutoFollowPattern.MAX_OUTSTANDING_READ_REQUESTS); + PARSER.declareInt(Request::setMaxWriteRequestOperationCount, AutoFollowPattern.MAX_WRITE_REQUEST_OPERATION_COUNT); + PARSER.declareField( + Request::setMaxWriteRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), AutoFollowPattern.MAX_WRITE_REQUEST_SIZE.getPreferredName()), + AutoFollowPattern.MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(Request::setMaxConcurrentWriteBatches, AutoFollowPattern.MAX_OUTSTANDING_WRITE_REQUESTS); + PARSER.declareInt(Request::setMaxWriteBufferCount, AutoFollowPattern.MAX_WRITE_BUFFER_COUNT); + PARSER.declareField( + Request::setMaxWriteBufferSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), AutoFollowPattern.MAX_WRITE_BUFFER_SIZE.getPreferredName()), + AutoFollowPattern.MAX_WRITE_BUFFER_SIZE, + ObjectParser.ValueType.STRING); PARSER.declareField(Request::setMaxRetryDelay, (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.MAX_RETRY_DELAY.getPreferredName()), AutoFollowPattern.MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); - PARSER.declareField(Request::setPollTimeout, - (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.POLL_TIMEOUT.getPreferredName()), - AutoFollowPattern.POLL_TIMEOUT, ObjectParser.ValueType.STRING); + PARSER.declareField(Request::setReadPollTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.READ_POLL_TIMEOUT.getPreferredName()), + AutoFollowPattern.READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); } public static Request fromXContent(XContentParser parser, String name) throws IOException { @@ -85,17 +95,20 @@ public static Request fromXContent(XContentParser parser, String name) throws IO } private String name; - private String leaderCluster; + private String remoteCluster; private List leaderIndexPatterns; private String followIndexNamePattern; - private Integer maxBatchOperationCount; + private Integer maxReadRequestOperationCount; + private ByteSizeValue maxReadRequestSize; private Integer maxConcurrentReadBatches; - private ByteSizeValue maxBatchSize; + private Integer maxWriteRequestOperationCount; + private ByteSizeValue maxWriteRequestSize; private Integer maxConcurrentWriteBatches; - private Integer maxWriteBufferSize; + private Integer maxWriteBufferCount; + private ByteSizeValue maxWriteBufferSize; private TimeValue maxRetryDelay; - private TimeValue pollTimeout; + private TimeValue readPollTimeout; @Override public ActionRequestValidationException validate() { @@ -103,8 +116,8 @@ public ActionRequestValidationException validate() { if (name == null) { validationException = addValidationError("[" + NAME_FIELD.getPreferredName() + "] is missing", validationException); } - if (leaderCluster == null) { - validationException = addValidationError("[" + AutoFollowPattern.LEADER_CLUSTER_FIELD.getPreferredName() + + if (remoteCluster == null) { + validationException = addValidationError("[" + REMOTE_CLUSTER_FIELD.getPreferredName() + "] is missing", validationException); } if (leaderIndexPatterns == null || leaderIndexPatterns.isEmpty()) { @@ -135,12 +148,12 @@ public void setName(String name) { this.name = name; } - public String getLeaderCluster() { - return leaderCluster; + public String getRemoteCluster() { + return remoteCluster; } - public void setLeaderCluster(String leaderCluster) { - this.leaderCluster = leaderCluster; + public void setRemoteCluster(String remoteCluster) { + this.remoteCluster = remoteCluster; } public List getLeaderIndexPatterns() { @@ -159,12 +172,12 @@ public void setFollowIndexNamePattern(String followIndexNamePattern) { this.followIndexNamePattern = followIndexNamePattern; } - public Integer getMaxBatchOperationCount() { - return maxBatchOperationCount; + public Integer getMaxReadRequestOperationCount() { + return maxReadRequestOperationCount; } - public void setMaxBatchOperationCount(Integer maxBatchOperationCount) { - this.maxBatchOperationCount = maxBatchOperationCount; + public void setMaxReadRequestOperationCount(Integer maxReadRequestOperationCount) { + this.maxReadRequestOperationCount = maxReadRequestOperationCount; } public Integer getMaxConcurrentReadBatches() { @@ -175,12 +188,28 @@ public void setMaxConcurrentReadBatches(Integer maxConcurrentReadBatches) { this.maxConcurrentReadBatches = maxConcurrentReadBatches; } - public ByteSizeValue getMaxBatchSize() { - return maxBatchSize; + public ByteSizeValue getMaxReadRequestSize() { + return maxReadRequestSize; + } + + public void setMaxReadRequestSize(ByteSizeValue maxReadRequestSize) { + this.maxReadRequestSize = maxReadRequestSize; + } + + public Integer getMaxWriteRequestOperationCount() { + return maxWriteRequestOperationCount; + } + + public void setMaxWriteRequestOperationCount(Integer maxWriteRequestOperationCount) { + this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; } - public void setMaxBatchSize(ByteSizeValue maxBatchSize) { - this.maxBatchSize = maxBatchSize; + public ByteSizeValue getMaxWriteRequestSize() { + return maxWriteRequestSize; + } + + public void setMaxWriteRequestSize(ByteSizeValue maxWriteRequestSize) { + this.maxWriteRequestSize = maxWriteRequestSize; } public Integer getMaxConcurrentWriteBatches() { @@ -191,11 +220,19 @@ public void setMaxConcurrentWriteBatches(Integer maxConcurrentWriteBatches) { this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; } - public Integer getMaxWriteBufferSize() { + public Integer getMaxWriteBufferCount() { + return maxWriteBufferCount; + } + + public void setMaxWriteBufferCount(Integer maxWriteBufferCount) { + this.maxWriteBufferCount = maxWriteBufferCount; + } + + public ByteSizeValue getMaxWriteBufferSize() { return maxWriteBufferSize; } - public void setMaxWriteBufferSize(Integer maxWriteBufferSize) { + public void setMaxWriteBufferSize(ByteSizeValue maxWriteBufferSize) { this.maxWriteBufferSize = maxWriteBufferSize; } @@ -207,44 +244,50 @@ public void setMaxRetryDelay(TimeValue maxRetryDelay) { this.maxRetryDelay = maxRetryDelay; } - public TimeValue getPollTimeout() { - return pollTimeout; + public TimeValue getReadPollTimeout() { + return readPollTimeout; } - public void setPollTimeout(TimeValue pollTimeout) { - this.pollTimeout = pollTimeout; + public void setReadPollTimeout(TimeValue readPollTimeout) { + this.readPollTimeout = readPollTimeout; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); name = in.readString(); - leaderCluster = in.readString(); + remoteCluster = in.readString(); leaderIndexPatterns = in.readList(StreamInput::readString); followIndexNamePattern = in.readOptionalString(); - maxBatchOperationCount = in.readOptionalVInt(); + maxReadRequestOperationCount = in.readOptionalVInt(); + maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); maxConcurrentReadBatches = in.readOptionalVInt(); - maxBatchSize = in.readOptionalWriteable(ByteSizeValue::new); + maxWriteRequestOperationCount = in.readOptionalVInt(); + maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); maxConcurrentWriteBatches = in.readOptionalVInt(); - maxWriteBufferSize = in.readOptionalVInt(); + maxWriteBufferCount = in.readOptionalVInt(); + maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); maxRetryDelay = in.readOptionalTimeValue(); - pollTimeout = in.readOptionalTimeValue(); + readPollTimeout = in.readOptionalTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(name); - out.writeString(leaderCluster); + out.writeString(remoteCluster); out.writeStringList(leaderIndexPatterns); out.writeOptionalString(followIndexNamePattern); - out.writeOptionalVInt(maxBatchOperationCount); + out.writeOptionalVInt(maxReadRequestOperationCount); + out.writeOptionalWriteable(maxReadRequestSize); out.writeOptionalVInt(maxConcurrentReadBatches); - out.writeOptionalWriteable(maxBatchSize); + out.writeOptionalVInt(maxWriteRequestOperationCount); + out.writeOptionalWriteable(maxWriteRequestSize); out.writeOptionalVInt(maxConcurrentWriteBatches); - out.writeOptionalVInt(maxWriteBufferSize); + out.writeOptionalVInt(maxWriteBufferCount); + out.writeOptionalWriteable(maxWriteBufferSize); out.writeOptionalTimeValue(maxRetryDelay); - out.writeOptionalTimeValue(pollTimeout); + out.writeOptionalTimeValue(readPollTimeout); } @Override @@ -252,31 +295,40 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); { builder.field(NAME_FIELD.getPreferredName(), name); - builder.field(AutoFollowPattern.LEADER_CLUSTER_FIELD.getPreferredName(), leaderCluster); + builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), remoteCluster); builder.field(AutoFollowPattern.LEADER_PATTERNS_FIELD.getPreferredName(), leaderIndexPatterns); if (followIndexNamePattern != null) { builder.field(AutoFollowPattern.FOLLOW_PATTERN_FIELD.getPreferredName(), followIndexNamePattern); } - if (maxBatchOperationCount != null) { - builder.field(AutoFollowPattern.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + if (maxReadRequestOperationCount != null) { + builder.field(AutoFollowPattern.MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); + } + if (maxReadRequestSize != null) { + builder.field(AutoFollowPattern.MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); + } + if (maxWriteRequestOperationCount != null) { + builder.field(AutoFollowPattern.MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); + } + if (maxWriteRequestSize != null) { + builder.field(AutoFollowPattern.MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); } - if (maxBatchSize != null) { - builder.field(AutoFollowPattern.MAX_BATCH_SIZE.getPreferredName(), maxBatchSize.getStringRep()); + if (maxWriteBufferCount != null) { + builder.field(AutoFollowPattern.MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); } if (maxWriteBufferSize != null) { - builder.field(AutoFollowPattern.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(AutoFollowPattern.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); } if (maxConcurrentReadBatches != null) { - builder.field(AutoFollowPattern.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + builder.field(AutoFollowPattern.MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxConcurrentReadBatches); } if (maxConcurrentWriteBatches != null) { - builder.field(AutoFollowPattern.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + builder.field(AutoFollowPattern.MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxConcurrentWriteBatches); } if (maxRetryDelay != null) { builder.field(AutoFollowPattern.MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); } - if (pollTimeout != null) { - builder.field(AutoFollowPattern.POLL_TIMEOUT.getPreferredName(), pollTimeout.getStringRep()); + if (readPollTimeout != null) { + builder.field(AutoFollowPattern.READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); } } builder.endObject(); @@ -289,32 +341,38 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Request request = (Request) o; return Objects.equals(name, request.name) && - Objects.equals(leaderCluster, request.leaderCluster) && + Objects.equals(remoteCluster, request.remoteCluster) && Objects.equals(leaderIndexPatterns, request.leaderIndexPatterns) && Objects.equals(followIndexNamePattern, request.followIndexNamePattern) && - Objects.equals(maxBatchOperationCount, request.maxBatchOperationCount) && + Objects.equals(maxReadRequestOperationCount, request.maxReadRequestOperationCount) && + Objects.equals(maxReadRequestSize, request.maxReadRequestSize) && Objects.equals(maxConcurrentReadBatches, request.maxConcurrentReadBatches) && - Objects.equals(maxBatchSize, request.maxBatchSize) && + Objects.equals(maxWriteRequestOperationCount, request.maxWriteRequestOperationCount) && + Objects.equals(maxWriteRequestSize, request.maxWriteRequestSize) && Objects.equals(maxConcurrentWriteBatches, request.maxConcurrentWriteBatches) && + Objects.equals(maxWriteBufferCount, request.maxWriteBufferCount) && Objects.equals(maxWriteBufferSize, request.maxWriteBufferSize) && Objects.equals(maxRetryDelay, request.maxRetryDelay) && - Objects.equals(pollTimeout, request.pollTimeout); + Objects.equals(readPollTimeout, request.readPollTimeout); } @Override public int hashCode() { return Objects.hash( name, - leaderCluster, + remoteCluster, leaderIndexPatterns, followIndexNamePattern, - maxBatchOperationCount, + maxReadRequestOperationCount, + maxReadRequestSize, maxConcurrentReadBatches, - maxBatchSize, + maxWriteRequestOperationCount, + maxWriteRequestSize, maxConcurrentWriteBatches, + maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, - pollTimeout); + readPollTimeout); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 5fdb13871b56c..b242b8cc8ec4c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -12,14 +12,30 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.FOLLOWER_INDEX_FIELD; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_READ_REQUEST_OPERATION_COUNT; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_READ_REQUEST_SIZE; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_OUTSTANDING_READ_REQUESTS; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_OUTSTANDING_WRITE_REQUESTS; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_RETRY_DELAY_FIELD; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_WRITE_BUFFER_COUNT; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_WRITE_BUFFER_SIZE; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.READ_POLL_TIMEOUT; + public final class PutFollowAction extends Action { public static final PutFollowAction INSTANCE = new PutFollowAction(); @@ -34,25 +50,102 @@ public Response newResponse() { return new Response(); } - public static class Request extends AcknowledgedRequest implements IndicesRequest { + public static class Request extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { + + private static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); + private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, () -> { + Request request = new Request(); + request.setFollowRequest(new ResumeFollowAction.Request()); + return request; + }); + + static { + PARSER.declareString(Request::setRemoteCluster, REMOTE_CLUSTER_FIELD); + PARSER.declareString(Request::setLeaderIndex, LEADER_INDEX_FIELD); + PARSER.declareString((req, val) -> req.followRequest.setFollowerIndex(val), FOLLOWER_INDEX_FIELD); + PARSER.declareInt((req, val) -> req.followRequest.setMaxReadRequestOperationCount(val), MAX_READ_REQUEST_OPERATION_COUNT); + PARSER.declareInt((req, val) -> req.followRequest.setMaxOutstandingReadRequests(val), MAX_OUTSTANDING_READ_REQUESTS); + PARSER.declareField( + (req, val) -> req.followRequest.setMaxReadRequestSize(val), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), + MAX_READ_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt((req, val) -> req.followRequest.setMaxOutstandingWriteRequests(val), MAX_OUTSTANDING_WRITE_REQUESTS); + PARSER.declareInt((req, val) -> req.followRequest.setMaxWriteBufferCount(val), MAX_WRITE_BUFFER_COUNT); + PARSER.declareField( + (req, val) -> req.followRequest.setMaxWriteBufferSize(val), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), + MAX_WRITE_BUFFER_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareField( + (req, val) -> req.followRequest.setMaxRetryDelay(val), + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY_FIELD.getPreferredName()), + MAX_RETRY_DELAY_FIELD, + ObjectParser.ValueType.STRING); + PARSER.declareField( + (req, val) -> req.followRequest.setReadPollTimeout(val), + (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), + READ_POLL_TIMEOUT, + ObjectParser.ValueType.STRING); + } + public static Request fromXContent(final XContentParser parser, final String followerIndex) throws IOException { + Request request = PARSER.parse(parser, followerIndex); + if (followerIndex != null) { + if (request.getFollowRequest().getFollowerIndex() == null) { + request.getFollowRequest().setFollowerIndex(followerIndex); + } else { + if (request.getFollowRequest().getFollowerIndex().equals(followerIndex) == false) { + throw new IllegalArgumentException("provided follower_index is not equal"); + } + } + } + return request; + } + + private String remoteCluster; + private String leaderIndex; private ResumeFollowAction.Request followRequest; - public Request(ResumeFollowAction.Request followRequest) { - this.followRequest = Objects.requireNonNull(followRequest); + public Request() { } - public Request() { + public String getRemoteCluster() { + return remoteCluster; + } + + public void setRemoteCluster(String remoteCluster) { + this.remoteCluster = remoteCluster; + } + public String getLeaderIndex() { + return leaderIndex; + } + + public void setLeaderIndex(String leaderIndex) { + this.leaderIndex = leaderIndex; } public ResumeFollowAction.Request getFollowRequest() { return followRequest; } + public void setFollowRequest(ResumeFollowAction.Request followRequest) { + this.followRequest = followRequest; + } + @Override public ActionRequestValidationException validate() { - return followRequest.validate(); + ActionRequestValidationException e = followRequest.validate(); + if (remoteCluster == null) { + e = addValidationError(REMOTE_CLUSTER_FIELD.getPreferredName() + " is missing", e); + } + if (leaderIndex == null) { + e = addValidationError(LEADER_INDEX_FIELD.getPreferredName() + " is missing", e); + } + return e; } @Override @@ -68,6 +161,8 @@ public IndicesOptions indicesOptions() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); + remoteCluster = in.readString(); + leaderIndex = in.readString(); followRequest = new ResumeFollowAction.Request(); followRequest.readFrom(in); } @@ -75,20 +170,36 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + out.writeString(remoteCluster); + out.writeString(leaderIndex); followRequest.writeTo(out); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), remoteCluster); + builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); + followRequest.toXContentFragment(builder, params); + } + builder.endObject(); + return builder; + } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Request request = (Request) o; - return Objects.equals(followRequest, request.followRequest); + return Objects.equals(remoteCluster, request.remoteCluster) && + Objects.equals(leaderIndex, request.leaderIndex) && + Objects.equals(followRequest, request.followRequest); } @Override public int hashCode() { - return Objects.hash(followRequest); + return Objects.hash(remoteCluster, leaderIndex, followRequest); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java index 02ed7a1a5fb40..11c46492cc0f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java @@ -43,40 +43,47 @@ public AcknowledgedResponse newResponse() { public static class Request extends ActionRequest implements ToXContentObject { - private static final ParseField LEADER_CLUSTER_FIELD = new ParseField("leader_cluster"); - private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); - private static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); - private static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count"); - private static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches"); - private static final ParseField MAX_BATCH_SIZE = new ParseField("max_batch_size"); - private static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); - private static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); - private static final ParseField MAX_RETRY_DELAY_FIELD = new ParseField("max_retry_delay"); - private static final ParseField POLL_TIMEOUT = new ParseField("poll_timeout"); - private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); + static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); + static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); + static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); + static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); + static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); + static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); + static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); + static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); + static final ParseField MAX_RETRY_DELAY_FIELD = new ParseField("max_retry_delay"); + static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); + static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); static { - PARSER.declareString(Request::setLeaderCluster, LEADER_CLUSTER_FIELD); - PARSER.declareString(Request::setLeaderIndex, LEADER_INDEX_FIELD); PARSER.declareString(Request::setFollowerIndex, FOLLOWER_INDEX_FIELD); - PARSER.declareInt(Request::setMaxBatchOperationCount, MAX_BATCH_OPERATION_COUNT); - PARSER.declareInt(Request::setMaxConcurrentReadBatches, MAX_CONCURRENT_READ_BATCHES); + PARSER.declareInt(Request::setMaxReadRequestOperationCount, MAX_READ_REQUEST_OPERATION_COUNT); PARSER.declareField( - Request::setMaxBatchSize, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_BATCH_SIZE.getPreferredName()), - MAX_BATCH_SIZE, + Request::setMaxReadRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), MAX_READ_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(Request::setMaxOutstandingReadRequests, MAX_OUTSTANDING_READ_REQUESTS); + PARSER.declareInt(Request::setMaxWriteRequestOperationCount, MAX_WRITE_REQUEST_OPERATION_COUNT); + PARSER.declareField(Request::setMaxWriteRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()), MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(Request::setMaxOutstandingWriteRequests, MAX_OUTSTANDING_WRITE_REQUESTS); + PARSER.declareInt(Request::setMaxWriteBufferCount, MAX_WRITE_BUFFER_COUNT); + PARSER.declareField( + Request::setMaxWriteBufferSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), + MAX_WRITE_BUFFER_SIZE, ObjectParser.ValueType.STRING); - PARSER.declareInt(Request::setMaxConcurrentWriteBatches, MAX_CONCURRENT_WRITE_BATCHES); - PARSER.declareInt(Request::setMaxWriteBufferSize, MAX_WRITE_BUFFER_SIZE); PARSER.declareField( Request::setMaxRetryDelay, (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY_FIELD.getPreferredName()), - MAX_RETRY_DELAY_FIELD, + MAX_RETRY_DELAY_FIELD, ObjectParser.ValueType.STRING); PARSER.declareField( - Request::setPollTimeout, - (p, c) -> TimeValue.parseTimeValue(p.text(), POLL_TIMEOUT.getPreferredName()), - POLL_TIMEOUT, + Request::setReadPollTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), + READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); } @@ -94,83 +101,93 @@ public static Request fromXContent(final XContentParser parser, final String fol return request; } - private String leaderCluster; + private String followerIndex; - public String getLeaderCluster() { - return leaderCluster; + public String getFollowerIndex() { + return followerIndex; } - public void setLeaderCluster(String leaderCluster) { - this.leaderCluster = leaderCluster; + public void setFollowerIndex(String followerIndex) { + this.followerIndex = followerIndex; } - private String leaderIndex; + private Integer maxReadRequestOperationCount; - public String getLeaderIndex() { - return leaderIndex; + public Integer getMaxReadRequestOperationCount() { + return maxReadRequestOperationCount; } - public void setLeaderIndex(String leaderIndex) { - this.leaderIndex = leaderIndex; + public void setMaxReadRequestOperationCount(Integer maxReadRequestOperationCount) { + this.maxReadRequestOperationCount = maxReadRequestOperationCount; } - private String followerIndex; + private Integer maxOutstandingReadRequests; - public String getFollowerIndex() { - return followerIndex; + public Integer getMaxOutstandingReadRequests() { + return maxOutstandingReadRequests; } - public void setFollowerIndex(String followerIndex) { - this.followerIndex = followerIndex; + public void setMaxOutstandingReadRequests(Integer maxOutstandingReadRequests) { + this.maxOutstandingReadRequests = maxOutstandingReadRequests; + } + + private ByteSizeValue maxReadRequestSize; + + public ByteSizeValue getMaxReadRequestSize() { + return maxReadRequestSize; + } + + public void setMaxReadRequestSize(ByteSizeValue maxReadRequestSize) { + this.maxReadRequestSize = maxReadRequestSize; } - private Integer maxBatchOperationCount; + private Integer maxWriteRequestOperationCount; - public Integer getMaxBatchOperationCount() { - return maxBatchOperationCount; + public Integer getMaxWriteRequestOperationCount() { + return maxWriteRequestOperationCount; } - public void setMaxBatchOperationCount(Integer maxBatchOperationCount) { - this.maxBatchOperationCount = maxBatchOperationCount; + public void setMaxWriteRequestOperationCount(Integer maxWriteRequestOperationCount) { + this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; } - private Integer maxConcurrentReadBatches; + private ByteSizeValue maxWriteRequestSize; - public Integer getMaxConcurrentReadBatches() { - return maxConcurrentReadBatches; + public ByteSizeValue getMaxWriteRequestSize() { + return maxWriteRequestSize; } - public void setMaxConcurrentReadBatches(Integer maxConcurrentReadBatches) { - this.maxConcurrentReadBatches = maxConcurrentReadBatches; + public void setMaxWriteRequestSize(ByteSizeValue maxWriteRequestSize) { + this.maxWriteRequestSize = maxWriteRequestSize; } - private ByteSizeValue maxBatchSize; + private Integer maxOutstandingWriteRequests; - public ByteSizeValue getMaxBatchSize() { - return maxBatchSize; + public Integer getMaxOutstandingWriteRequests() { + return maxOutstandingWriteRequests; } - public void setMaxBatchSize(ByteSizeValue maxBatchSize) { - this.maxBatchSize = maxBatchSize; + public void setMaxOutstandingWriteRequests(Integer maxOutstandingWriteRequests) { + this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; } - private Integer maxConcurrentWriteBatches; + private Integer maxWriteBufferCount; - public Integer getMaxConcurrentWriteBatches() { - return maxConcurrentWriteBatches; + public Integer getMaxWriteBufferCount() { + return maxWriteBufferCount; } - public void setMaxConcurrentWriteBatches(Integer maxConcurrentWriteBatches) { - this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + public void setMaxWriteBufferCount(Integer maxWriteBufferCount) { + this.maxWriteBufferCount = maxWriteBufferCount; } - private Integer maxWriteBufferSize; + private ByteSizeValue maxWriteBufferSize; - public Integer getMaxWriteBufferSize() { + public ByteSizeValue getMaxWriteBufferSize() { return maxWriteBufferSize; } - public void setMaxWriteBufferSize(Integer maxWriteBufferSize) { + public void setMaxWriteBufferSize(ByteSizeValue maxWriteBufferSize) { this.maxWriteBufferSize = maxWriteBufferSize; } @@ -184,14 +201,14 @@ public TimeValue getMaxRetryDelay() { return maxRetryDelay; } - private TimeValue pollTimeout; + private TimeValue readPollTimeout; - public TimeValue getPollTimeout() { - return pollTimeout; + public TimeValue getReadPollTimeout() { + return readPollTimeout; } - public void setPollTimeout(TimeValue pollTimeout) { - this.pollTimeout = pollTimeout; + public void setReadPollTimeout(TimeValue readPollTimeout) { + this.readPollTimeout = readPollTimeout; } public Request() { @@ -201,28 +218,31 @@ public Request() { public ActionRequestValidationException validate() { ActionRequestValidationException e = null; - if (leaderCluster == null) { - e = addValidationError(LEADER_CLUSTER_FIELD.getPreferredName() + " is missing", e); - } - if (leaderIndex == null) { - e = addValidationError(LEADER_INDEX_FIELD.getPreferredName() + " is missing", e); - } if (followerIndex == null) { e = addValidationError(FOLLOWER_INDEX_FIELD.getPreferredName() + " is missing", e); } - if (maxBatchOperationCount != null && maxBatchOperationCount < 1) { - e = addValidationError(MAX_BATCH_OPERATION_COUNT.getPreferredName() + " must be larger than 0", e); + if (maxReadRequestOperationCount != null && maxReadRequestOperationCount < 1) { + e = addValidationError(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName() + " must be larger than 0", e); + } + if (maxReadRequestSize != null && maxReadRequestSize.compareTo(ByteSizeValue.ZERO) <= 0) { + e = addValidationError(MAX_READ_REQUEST_SIZE.getPreferredName() + " must be larger than 0", e); + } + if (maxOutstandingReadRequests != null && maxOutstandingReadRequests < 1) { + e = addValidationError(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName() + " must be larger than 0", e); } - if (maxConcurrentReadBatches != null && maxConcurrentReadBatches < 1) { - e = addValidationError(MAX_CONCURRENT_READ_BATCHES.getPreferredName() + " must be larger than 0", e); + if (maxWriteRequestOperationCount != null && maxWriteRequestOperationCount < 1) { + e = addValidationError(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName() + " must be larger than 0", e); } - if (maxBatchSize != null && maxBatchSize.compareTo(ByteSizeValue.ZERO) <= 0) { - e = addValidationError(MAX_BATCH_SIZE.getPreferredName() + " must be larger than 0", e); + if (maxWriteRequestSize != null && maxWriteRequestSize.compareTo(ByteSizeValue.ZERO) <= 0) { + e = addValidationError(MAX_WRITE_REQUEST_SIZE.getPreferredName() + " must be larger than 0", e); } - if (maxConcurrentWriteBatches != null && maxConcurrentWriteBatches < 1) { - e = addValidationError(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName() + " must be larger than 0", e); + if (maxOutstandingWriteRequests != null && maxOutstandingWriteRequests < 1) { + e = addValidationError(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName() + " must be larger than 0", e); } - if (maxWriteBufferSize != null && maxWriteBufferSize < 1) { + if (maxWriteBufferCount != null && maxWriteBufferCount < 1) { + e = addValidationError(MAX_WRITE_BUFFER_COUNT.getPreferredName() + " must be larger than 0", e); + } + if (maxWriteBufferSize != null && maxWriteBufferSize.compareTo(ByteSizeValue.ZERO) <= 0) { e = addValidationError(MAX_WRITE_BUFFER_SIZE.getPreferredName() + " must be larger than 0", e); } if (maxRetryDelay != null && maxRetryDelay.millis() <= 0) { @@ -242,96 +262,107 @@ public ActionRequestValidationException validate() { @Override public void readFrom(final StreamInput in) throws IOException { super.readFrom(in); - leaderCluster = in.readString(); - leaderIndex = in.readString(); followerIndex = in.readString(); - maxBatchOperationCount = in.readOptionalVInt(); - maxConcurrentReadBatches = in.readOptionalVInt(); - maxBatchSize = in.readOptionalWriteable(ByteSizeValue::new); - maxConcurrentWriteBatches = in.readOptionalVInt(); - maxWriteBufferSize = in.readOptionalVInt(); + maxReadRequestOperationCount = in.readOptionalVInt(); + maxOutstandingReadRequests = in.readOptionalVInt(); + maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); + maxOutstandingWriteRequests = in.readOptionalVInt(); + maxWriteBufferCount = in.readOptionalVInt(); + maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); maxRetryDelay = in.readOptionalTimeValue(); - pollTimeout = in.readOptionalTimeValue(); + readPollTimeout = in.readOptionalTimeValue(); } @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(leaderCluster); - out.writeString(leaderIndex); out.writeString(followerIndex); - out.writeOptionalVInt(maxBatchOperationCount); - out.writeOptionalVInt(maxConcurrentReadBatches); - out.writeOptionalWriteable(maxBatchSize); - out.writeOptionalVInt(maxConcurrentWriteBatches); - out.writeOptionalVInt(maxWriteBufferSize); + out.writeOptionalVInt(maxReadRequestOperationCount); + out.writeOptionalVInt(maxOutstandingReadRequests); + out.writeOptionalWriteable(maxReadRequestSize); + out.writeOptionalVInt(maxOutstandingWriteRequests); + out.writeOptionalVInt(maxWriteBufferCount); + out.writeOptionalWriteable(maxWriteBufferSize); out.writeOptionalTimeValue(maxRetryDelay); - out.writeOptionalTimeValue(pollTimeout); + out.writeOptionalTimeValue(readPollTimeout); } @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); { - builder.field(LEADER_CLUSTER_FIELD.getPreferredName(), leaderCluster); - builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); - builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); - if (maxBatchOperationCount != null) { - builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); - } - if (maxBatchSize != null) { - builder.field(MAX_BATCH_SIZE.getPreferredName(), maxBatchSize.getStringRep()); - } - if (maxWriteBufferSize != null) { - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); - } - if (maxConcurrentReadBatches != null) { - builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); - } - if (maxConcurrentWriteBatches != null) { - builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); - } - if (maxRetryDelay != null) { - builder.field(MAX_RETRY_DELAY_FIELD.getPreferredName(), maxRetryDelay.getStringRep()); - } - if (pollTimeout != null) { - builder.field(POLL_TIMEOUT.getPreferredName(), pollTimeout.getStringRep()); - } + toXContentFragment(builder, params); } builder.endObject(); return builder; } + void toXContentFragment(final XContentBuilder builder, final Params params) throws IOException { + builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); + if (maxReadRequestOperationCount != null) { + builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); + } + if (maxReadRequestSize != null) { + builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); + } + if (maxWriteRequestOperationCount != null) { + builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); + } + if (maxWriteRequestSize != null) { + builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); + } + if (maxWriteBufferCount != null) { + builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); + } + if (maxWriteBufferSize != null) { + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); + } + if (maxOutstandingReadRequests != null) { + builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); + } + if (maxOutstandingWriteRequests != null) { + builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); + } + if (maxRetryDelay != null) { + builder.field(MAX_RETRY_DELAY_FIELD.getPreferredName(), maxRetryDelay.getStringRep()); + } + if (readPollTimeout != null) { + builder.field(READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); + } + } + @Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Request request = (Request) o; - return Objects.equals(maxBatchOperationCount, request.maxBatchOperationCount) && - Objects.equals(maxConcurrentReadBatches, request.maxConcurrentReadBatches) && - Objects.equals(maxBatchSize, request.maxBatchSize) && - Objects.equals(maxConcurrentWriteBatches, request.maxConcurrentWriteBatches) && + return Objects.equals(maxReadRequestOperationCount, request.maxReadRequestOperationCount) && + Objects.equals(maxReadRequestSize, request.maxReadRequestSize) && + Objects.equals(maxOutstandingReadRequests, request.maxOutstandingReadRequests) && + Objects.equals(maxWriteRequestOperationCount, request.maxWriteRequestOperationCount) && + Objects.equals(maxWriteRequestSize, request.maxWriteRequestSize) && + Objects.equals(maxOutstandingWriteRequests, request.maxOutstandingWriteRequests) && + Objects.equals(maxWriteBufferCount, request.maxWriteBufferCount) && Objects.equals(maxWriteBufferSize, request.maxWriteBufferSize) && Objects.equals(maxRetryDelay, request.maxRetryDelay) && - Objects.equals(pollTimeout, request.pollTimeout) && - Objects.equals(leaderCluster, request.leaderCluster) && - Objects.equals(leaderIndex, request.leaderIndex) && + Objects.equals(readPollTimeout, request.readPollTimeout) && Objects.equals(followerIndex, request.followerIndex); } @Override public int hashCode() { return Objects.hash( - leaderCluster, - leaderIndex, followerIndex, - maxBatchOperationCount, - maxConcurrentReadBatches, - maxBatchSize, - maxConcurrentWriteBatches, + maxReadRequestOperationCount, + maxReadRequestSize, + maxOutstandingReadRequests, + maxWriteRequestOperationCount, + maxWriteRequestSize, + maxOutstandingWriteRequests, + maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, - pollTimeout); + readPollTimeout); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java index 172a6fe617b33..51a4736e3adee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java @@ -20,19 +20,14 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -51,7 +46,6 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject { private static final String PAGE_SIZE = "page_size"; private static final String INDEX_PATTERN = "index_pattern"; private static final String ROLLUP_INDEX = "rollup_index"; - private static final List DEFAULT_HISTO_METRICS = Arrays.asList(MaxAggregationBuilder.NAME, MinAggregationBuilder.NAME); private final String id; private final String indexPattern; @@ -129,7 +123,7 @@ public RollupJobConfig(final String id, this.indexPattern = indexPattern; this.rollupIndex = rollupIndex; this.groupConfig = groupConfig; - this.metricsConfig = addDefaultMetricsIfNeeded(metricsConfig, groupConfig); + this.metricsConfig = metricsConfig != null ? metricsConfig : Collections.emptyList(); this.timeout = timeout != null ? timeout : DEFAULT_TIMEOUT; this.cron = cron; this.pageSize = pageSize; @@ -283,23 +277,4 @@ public String toJSONString() { public static RollupJobConfig fromXContent(final XContentParser parser, @Nullable final String optionalJobId) throws IOException { return PARSER.parse(parser, optionalJobId); } - - private static List addDefaultMetricsIfNeeded(List metrics, GroupConfig groupConfig) { - List inputMetrics = metrics != null ? new ArrayList<>(metrics) : new ArrayList<>(); - if (groupConfig != null) { - String timeField = groupConfig.getDateHistogram().getField(); - Set currentFields = inputMetrics.stream().map(MetricConfig::getField).collect(Collectors.toSet()); - if (currentFields.contains(timeField) == false) { - inputMetrics.add(new MetricConfig(timeField, DEFAULT_HISTO_METRICS)); - } - if (groupConfig.getHistogram() != null) { - for (String histoField : groupConfig.getHistogram().getFields()) { - if (currentFields.contains(histoField) == false) { - inputMetrics.add(new MetricConfig(histoField, DEFAULT_HISTO_METRICS)); - } - } - } - } - return Collections.unmodifiableList(inputMetrics); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java index 5a228133073e3..81b98e34996e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java @@ -20,6 +20,7 @@ public static boolean isReserved(String username, Settings settings) { case UsernamesField.LOGSTASH_NAME: case UsernamesField.BEATS_NAME: case UsernamesField.APM_NAME: + case UsernamesField.REMOTE_MONITORING_NAME: return XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); default: return AnonymousUser.isAnonymousUsername(username, settings); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 3999e9ad3d094..24b17976f4c9e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -72,8 +72,25 @@ private static Map initializeReservedRoles() { "cluster:admin/xpack/watcher/watch/delete", }, new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("all").build() }, + RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("all").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("metricbeat-*").privileges("index", "create_index").build() }, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("remote_monitoring_collector", new RoleDescriptor( + "remote_monitoring_collector", + new String[] { + "monitor" + }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("monitor").build(), + RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*").privileges("read").build() + }, + null, + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null + )) .put("ingest_admin", new RoleDescriptor("ingest_admin", new String[] { "manage_index_templates", "manage_pipeline" }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) // reporting_user doesn't have any privileges in Elasticsearch, and Kibana authorizes privileges based on this role diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java index 36b480c29c7fd..e76302aebb058 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java @@ -157,11 +157,12 @@ public SslChannelInitializer(String name, SSLConfiguration configuration) { @Override protected void initChannel(Channel ch) throws Exception { - super.initChannel(ch); SSLEngine serverEngine = sslService.createSSLEngine(configuration, null, -1); serverEngine.setUseClientMode(false); final SslHandler sslHandler = new SslHandler(serverEngine); ch.pipeline().addFirst("sslhandler", sslHandler); + super.initChannel(ch); + assert ch.pipeline().first() == sslHandler : "SSL handler must be first handler in pipeline"; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/RemoteMonitoringUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/RemoteMonitoringUser.java new file mode 100644 index 0000000000000..ad51c575d72d9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/RemoteMonitoringUser.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.Version; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +/** + * Built in user for remote monitoring: collection as well as indexing. + */ +public class RemoteMonitoringUser extends User { + + public static final String NAME = UsernamesField.REMOTE_MONITORING_NAME; + public static final String COLLECTION_ROLE_NAME = UsernamesField.REMOTE_MONITORING_COLLECTION_ROLE; + public static final String INDEXING_ROLE_NAME = UsernamesField.REMOTE_MONITORING_INDEXING_ROLE; + + public static final Version DEFINED_SINCE = Version.V_6_5_0; + + public RemoteMonitoringUser(boolean enabled) { + super(NAME, new String[]{ COLLECTION_ROLE_NAME, INDEXING_ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java index bd886567ed1b2..0a593ad992820 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java @@ -23,5 +23,9 @@ public final class UsernamesField { public static final String APM_NAME = "apm_system"; public static final String APM_ROLE = "apm_system"; + public static final String REMOTE_MONITORING_NAME = "remote_monitoring_user"; + public static final String REMOTE_MONITORING_COLLECTION_ROLE = "remote_monitoring_collector"; + public static final String REMOTE_MONITORING_INDEXING_ROLE = "remote_monitoring_agent"; + private UsernamesField() {} } diff --git a/x-pack/plugin/core/src/main/resources/monitoring-es.json b/x-pack/plugin/core/src/main/resources/monitoring-es.json index 791a0ea02c392..bdd16d3b58cc6 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-es.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-es.json @@ -929,7 +929,7 @@ }, "ccr_stats": { "properties": { - "leader_cluster": { + "remote_cluster": { "type": "keyword" }, "leader_index": { @@ -956,49 +956,52 @@ "last_requested_seq_no": { "type": "long" }, - "number_of_concurrent_reads": { + "outstanding_read_requests": { "type": "long" }, - "number_of_concurrent_writes": { + "outstanding_write_requests": { "type": "long" }, - "number_of_queued_writes": { + "write_buffer_operation_count": { "type": "long" }, - "mapping_version": { + "write_buffer_size_in_bytes": { "type": "long" }, - "total_fetch_time_millis": { + "follower_mapping_version": { "type": "long" }, - "total_fetch_leader_time_millis": { + "total_read_time_millis": { "type": "long" }, - "number_of_successful_fetches": { + "total_read_remote_exec_time_millis": { "type": "long" }, - "number_of_failed_fetches": { + "successful_read_requests": { "type": "long" }, - "operations_received": { + "failed_read_requests": { "type": "long" }, - "total_transferred_bytes": { + "operations_read": { "type": "long" }, - "total_index_time_millis": { + "bytes_read": { "type": "long" }, - "number_of_successful_bulk_operations": { + "total_write_time_millis": { "type": "long" }, - "number_of_failed_bulk_operations": { + "successful_write_requests": { "type": "long" }, - "number_of_operations_indexed": { + "failed_write_requests": { "type": "long" }, - "fetch_exceptions": { + "operations_written": { + "type": "long" + }, + "read_exceptions": { "type": "nested", "properties": { "from_seq_no": { @@ -1020,7 +1023,7 @@ } } }, - "time_since_last_fetch_millis": { + "time_since_last_read_millis": { "type": "long" } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/monitoring/test/MockPainlessScriptEngine.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/monitoring/test/MockPainlessScriptEngine.java index 5aeeb47db7e77..2052cebe1d0e9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/monitoring/test/MockPainlessScriptEngine.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/monitoring/test/MockPainlessScriptEngine.java @@ -8,9 +8,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.script.SearchScript; import java.util.Collection; import java.util.Collections; @@ -43,9 +43,8 @@ public String getType() { @Override public T compile(String name, String script, ScriptContext context, Map options) { - MockCompiledScript compiledScript = new MockCompiledScript(name, options, script, p -> script); - if (context.instanceClazz.equals(SearchScript.class)) { - return context.factoryClazz.cast((SearchScript.Factory) compiledScript::createSearchScript); + if (context.instanceClazz.equals(ScoreScript.class)) { + return context.factoryClazz.cast(new MockScoreScript(p -> 0.0)); } throw new IllegalArgumentException("mock painless does not know how to handle context [" + context.name + "]"); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfigTests.java index fa9009af018a2..09d00e11fef92 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfigTests.java @@ -11,23 +11,10 @@ import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.stream.Collectors; - -import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiAlphanumOfLengthBetween; + import static java.util.Collections.emptyList; -import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomCron; -import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomDateHistogramGroupConfig; -import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig; -import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomMetricsConfigs; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomRollupJobConfig; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.isIn; public class RollupJobConfigTests extends AbstractSerializingTestCase { @@ -176,69 +163,4 @@ public void testEmptyGroupAndMetrics() { null, emptyList(), sample.getTimeout())); assertThat(e.getMessage(), equalTo("At least one grouping or metric must be configured")); } - - public void testDefaultFieldsForDateHistograms() { - final Random random = random(); - DateHistogramGroupConfig dateHistogramGroupConfig = randomDateHistogramGroupConfig(random); - HistogramGroupConfig histogramGroupConfig1 = randomHistogramGroupConfig(random); - List metrics = new ArrayList<>(randomMetricsConfigs(random)); - for (String histoField : histogramGroupConfig1.getFields()) { - metrics.add(new MetricConfig(histoField, Arrays.asList("max"))); - } - GroupConfig groupConfig = new GroupConfig(dateHistogramGroupConfig, histogramGroupConfig1, null); - RollupJobConfig rollupJobConfig = new RollupJobConfig( - randomAsciiAlphanumOfLengthBetween(random, 1, 20), - "indexes_*", - "rollup_" + randomAsciiAlphanumOfLengthBetween(random, 1, 20), - randomCron(), - randomIntBetween(1, 10), - groupConfig, - metrics, - null); - Set metricFields = rollupJobConfig.getMetricsConfig().stream().map(MetricConfig::getField).collect(Collectors.toSet()); - assertThat(dateHistogramGroupConfig.getField(), isIn(metricFields)); - List histoFields = Arrays.asList(histogramGroupConfig1.getFields()); - rollupJobConfig.getMetricsConfig().forEach(metricConfig -> { - if (histoFields.contains(metricConfig.getField())) { - // Since it is explicitly included, the defaults should not be added - assertThat(metricConfig.getMetrics(), containsInAnyOrder("max")); - } - if (metricConfig.getField().equals(dateHistogramGroupConfig.getField())) { - assertThat(metricConfig.getMetrics(), containsInAnyOrder("max", "min")); - } - }); - } - - public void testDefaultFieldsForHistograms() { - final Random random = random(); - DateHistogramGroupConfig dateHistogramGroupConfig = randomDateHistogramGroupConfig(random); - HistogramGroupConfig histogramGroupConfig1 = randomHistogramGroupConfig(random); - List metrics = new ArrayList<>(randomMetricsConfigs(random)); - metrics.add(new MetricConfig(dateHistogramGroupConfig.getField(), Arrays.asList("max"))); - GroupConfig groupConfig = new GroupConfig(dateHistogramGroupConfig, histogramGroupConfig1, null); - RollupJobConfig rollupJobConfig = new RollupJobConfig( - randomAsciiAlphanumOfLengthBetween(random, 1, 20), - "indexes_*", - "rollup_" + randomAsciiAlphanumOfLengthBetween(random, 1, 20), - randomCron(), - randomIntBetween(1, 10), - groupConfig, - metrics, - null); - Set metricFields = rollupJobConfig.getMetricsConfig().stream().map(MetricConfig::getField).collect(Collectors.toSet()); - for (String histoField : histogramGroupConfig1.getFields()) { - assertThat(histoField, isIn(metricFields)); - } - assertThat(dateHistogramGroupConfig.getField(), isIn(metricFields)); - List histoFields = Arrays.asList(histogramGroupConfig1.getFields()); - rollupJobConfig.getMetricsConfig().forEach(metricConfig -> { - if (histoFields.contains(metricConfig.getField())) { - assertThat(metricConfig.getMetrics(), containsInAnyOrder("max", "min")); - } - if (metricConfig.getField().equals(dateHistogramGroupConfig.getField())) { - // Since it is explicitly included, the defaults should not be added - assertThat(metricConfig.getMetrics(), containsInAnyOrder("max")); - } - }); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 9972fc7b74bcf..bdb8c09d48424 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; @@ -97,6 +98,7 @@ import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; @@ -136,7 +138,6 @@ public void testIsReserved() { assertThat(ReservedRolesStore.isReserved("transport_client"), is(true)); assertThat(ReservedRolesStore.isReserved("kibana_user"), is(true)); assertThat(ReservedRolesStore.isReserved("ingest_admin"), is(true)); - assertThat(ReservedRolesStore.isReserved("remote_monitoring_agent"), is(true)); assertThat(ReservedRolesStore.isReserved("monitoring_user"), is(true)); assertThat(ReservedRolesStore.isReserved("reporting_user"), is(true)); assertThat(ReservedRolesStore.isReserved("machine_learning_user"), is(true)); @@ -149,6 +150,8 @@ public void testIsReserved() { assertThat(ReservedRolesStore.isReserved(LogstashSystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(BeatsSystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(APMSystemUser.ROLE_NAME), is(true)); + assertThat(ReservedRolesStore.isReserved(RemoteMonitoringUser.COLLECTION_ROLE_NAME), is(true)); + assertThat(ReservedRolesStore.isReserved(RemoteMonitoringUser.INDEXING_ROLE_NAME), is(true)); } public void testIngestAdminRole() { @@ -399,17 +402,75 @@ public void testRemoteMonitoringAgentRole() { assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:foo") .test(randomAlphaOfLengthBetween(8, 24)), is(false)); - final String index = ".monitoring-" + randomAlphaOfLength(randomIntBetween(0, 13)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); - assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(index), is(true)); + final String monitoringIndex = ".monitoring-" + randomAlphaOfLength(randomIntBetween(0, 13)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:foo").test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:bar").test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(monitoringIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(monitoringIndex), is(true)); + + final String metricbeatIndex = "metricbeat-" + randomAlphaOfLength(randomIntBetween(0, 13)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:foo").test(metricbeatIndex), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:bar").test(metricbeatIndex), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(metricbeatIndex), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(metricbeatIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(metricbeatIndex), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(metricbeatIndex), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(metricbeatIndex), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(metricbeatIndex), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(metricbeatIndex), is(false)); + + } + + public void testRemoteMonitoringCollectorRole() { + final TransportRequest request = mock(TransportRequest.class); + + RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("remote_monitoring_collector"); + assertNotNull(roleDescriptor); + assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + + Role remoteMonitoringAgentRole = Role.builder(roleDescriptor, null).build(); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStateAction.NAME, request), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(GetIndexTemplatesAction.NAME, request), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(DeleteIndexTemplateAction.NAME, request), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + + assertThat(remoteMonitoringAgentRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); + + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(RecoveryAction.NAME).test("foo"), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test("foo"), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(".reporting"), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(".kibana"), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(".kibana"), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:foo") + .test(randomAlphaOfLengthBetween(8, 24)), is(false)); + + Arrays.asList( + ".monitoring-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "metricbeat-" + randomAlphaOfLength(randomIntBetween(0, 13)) + ).forEach((index) -> { + logger.info("index name [{}]", index); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(index), is(false)); + }); } public void testReportingUserRole() { diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java new file mode 100644 index 0000000000000..7f11c2c2944a7 --- /dev/null +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.deprecation; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +public class ClusterDeprecationChecks { + + static DeprecationIssue checkShardLimit(ClusterState state) { + int shardsPerNode = MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(state.metaData().settings()); + int nodeCount = state.getNodes().getDataNodes().size(); + int maxShardsInCluster = shardsPerNode * nodeCount; + int currentOpenShards = state.getMetaData().getTotalOpenIndexShards(); + + if (currentOpenShards >= maxShardsInCluster) { + return new DeprecationIssue(DeprecationIssue.Level.WARNING, + "Number of open shards exceeds cluster soft limit", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_70_cluster_changes.html", + "There are [" + currentOpenShards + "] open shards in this cluster, but the cluster is limited to [" + + shardsPerNode + "] per data node, for [" + maxShardsInCluster + "] maximum."); + } + return null; + } +} diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 83b72d47838ad..97c0498c3f603 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -29,7 +29,7 @@ private DeprecationChecks() { static List> CLUSTER_SETTINGS_CHECKS = Collections.unmodifiableList(Arrays.asList( - // STUB + ClusterDeprecationChecks::checkShardLimit )); static List, List, DeprecationIssue>> NODE_SETTINGS_CHECKS = diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 8dd5e61bbc463..f3a6dc8b7a49f 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -1,5 +1,4 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask -import org.elasticsearch.gradle.BuildPlugin evaluationDependsOn(xpackModule('core')) @@ -99,7 +98,6 @@ task internalClusterTest(type: RandomizedTestingTask, group: JavaBasePlugin.VERIFICATION_GROUP, description: 'Multi-node tests', dependsOn: test.dependsOn) { - configure(BuildPlugin.commonTestConfig(project)) classpath = project.test.classpath testClassesDirs = project.test.testClassesDirs include '**/*IT.class' diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java index 34d58ef08bf50..bc847e1a07d58 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java +++ b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java @@ -176,11 +176,6 @@ static class TestConfiguration { tests.add(new TestConfiguration(null, "shishi.xn--fiqs8s","shishi.xn--fiqs8s")); } - public void testEmptyToLetBuildPass() { - // remove this once one of the awaitsfixes are removed!! - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34683") public void testIsolated() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) @@ -200,7 +195,7 @@ public void testIsolated() throws Exception { String mapAsJson = Strings.toString(jsonBuilder().map(params)); logger.info("params={}", mapAsJson); - Request searchRequest = new Request("GET", "/painless/test/_search"); + Request searchRequest = new Request("GET", "/painless/_search"); searchRequest.setJsonEntity( "{\n" + " \"query\" : {\n" + @@ -210,7 +205,7 @@ public void testIsolated() throws Exception { " \"domain_split\" : {\n" + " \"script\" : {\n" + " \"lang\": \"painless\",\n" + - " \"inline\": \"" + + " \"source\": \"" + " return domainSplit(params['host']); \",\n" + " \"params\": " + mapAsJson + "\n" + " }\n" + diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningPainlessExtension.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningPainlessExtension.java index b55936acd06e1..12d2626db74a0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningPainlessExtension.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningPainlessExtension.java @@ -8,8 +8,8 @@ import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.script.FieldScript; import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.SearchScript; import java.util.Collections; import java.util.List; @@ -21,6 +21,6 @@ public class MachineLearningPainlessExtension implements PainlessExtension { @Override public Map, List> getContextWhitelists() { - return Collections.singletonMap(SearchScript.CONTEXT, Collections.singletonList(WHITELIST)); + return Collections.singletonMap(FieldScript.CONTEXT, Collections.singletonList(WHITELIST)); } } diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index e551d577b7bbd..54df68e769c39 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -1,5 +1,4 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask -import org.elasticsearch.gradle.BuildPlugin evaluationDependsOn(xpackModule('core')) @@ -61,7 +60,6 @@ task internalClusterTest(type: RandomizedTestingTask, group: JavaBasePlugin.VERIFICATION_GROUP, description: 'Multi-node tests', dependsOn: test.dependsOn) { - configure(BuildPlugin.commonTestConfig(project)) classpath = project.test.classpath testClassesDirs = project.test.testClassesDirs include '**/*IT.class' diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 576f37d78440e..849461f1b6202 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -231,7 +231,7 @@ public void testVerifyMissingNormal() { MultiSearchResponse.Item missing = new MultiSearchResponse.Item(null, new IndexNotFoundException("foo")); Exception e = expectThrows(RuntimeException.class, () -> RollupResponseTranslator.verifyResponse(missing)); - assertThat(e.getMessage(), equalTo("no such index")); + assertThat(e.getMessage(), equalTo("no such index [foo]")); } public void testTranslateRollup() { @@ -287,7 +287,7 @@ public void testTranslateMissingRollup() { Exception e = expectThrows(RuntimeException.class, () -> RollupResponseTranslator.translateResponse(new MultiSearchResponse.Item[]{missing}, context)); - assertThat(e.getMessage(), equalTo("no such index")); + assertThat(e.getMessage(), equalTo("no such index [foo]")); } public void testMissingFilter() { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 4784e75d64673..f33c1d4e008ba 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -116,8 +116,6 @@ public void testSimpleDateHisto() throws Exception { "the_histo.date_histogram.interval", "1ms", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", 3.0, - "the_histo.max.value", 3.0, "_rollup.id", job.getId() ) )); @@ -131,8 +129,6 @@ public void testSimpleDateHisto() throws Exception { "the_histo.date_histogram.interval", "1ms", "the_histo.date_histogram._count", 1, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", 7.0, - "the_histo.max.value", 7.0, "_rollup.id", job.getId() ) )); @@ -183,8 +179,6 @@ public void testDateHistoAndMetrics() throws Exception { "counter.max.value", 20.0, "counter.sum.value", 50.0, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", (double) asLong("2015-03-31T03:00:00"), - "the_histo.max.value", (double) asLong("2015-03-31T03:40:00"), "_rollup.id", job.getId() ) )); @@ -203,8 +197,6 @@ public void testDateHistoAndMetrics() throws Exception { "counter.max.value", 55.0, "counter.sum.value", 141.0, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", (double) asLong("2015-03-31T04:00:00"), - "the_histo.max.value", (double) asLong("2015-03-31T04:40:00"), "_rollup.id", job.getId() ) )); @@ -223,8 +215,6 @@ public void testDateHistoAndMetrics() throws Exception { "counter.max.value", 80.0, "counter.sum.value", 275.0, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", (double) asLong("2015-03-31T05:00:00"), - "the_histo.max.value", (double) asLong("2015-03-31T05:40:00"), "_rollup.id", job.getId() ) )); @@ -243,8 +233,6 @@ public void testDateHistoAndMetrics() throws Exception { "counter.max.value", 100.0, "counter.sum.value", 270.0, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", (double) asLong("2015-03-31T06:00:00"), - "the_histo.max.value", (double) asLong("2015-03-31T06:40:00"), "_rollup.id", job.getId() ) )); @@ -263,8 +251,6 @@ public void testDateHistoAndMetrics() throws Exception { "counter.max.value", 200.0, "counter.sum.value", 440.0, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", (double) asLong("2015-03-31T07:00:00"), - "the_histo.max.value", (double) asLong("2015-03-31T07:40:00"), "_rollup.id", job.getId() ) )); @@ -306,8 +292,6 @@ public void testSimpleDateHistoWithDelay() throws Exception { "the_histo.date_histogram.interval", "1m", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", (double) (now - TimeValue.timeValueHours(5).getMillis()), - "the_histo.max.value", (double) (now - TimeValue.timeValueHours(5).getMillis()), "_rollup.id", job.getId() ) )); @@ -321,8 +305,6 @@ public void testSimpleDateHistoWithDelay() throws Exception { "the_histo.date_histogram.interval", "1m", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", (double) (now - TimeValue.timeValueMinutes(75).getMillis()), - "the_histo.max.value", (double) (now - TimeValue.timeValueMinutes(75).getMillis()), "_rollup.id", job.getId() ) )); @@ -336,8 +318,6 @@ public void testSimpleDateHistoWithDelay() throws Exception { "the_histo.date_histogram.interval", "1m", "the_histo.date_histogram._count", 1, "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), - "the_histo.min.value", (double) (now - TimeValue.timeValueMinutes(61).getMillis()), - "the_histo.max.value", (double) (now - TimeValue.timeValueMinutes(61).getMillis()), "_rollup.id", job.getId() ) )); @@ -377,8 +357,6 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", timeZone.toString(), - "the_histo.min.value", (double) (now - TimeValue.timeValueHours(10).getMillis()), - "the_histo.max.value", (double) (now - TimeValue.timeValueHours(8).getMillis()), "_rollup.id", job.getId() ) )); @@ -398,8 +376,6 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", timeZone.toString(), - "the_histo.min.value", (double) (now - TimeValue.timeValueHours(10).getMillis()), - "the_histo.max.value", (double) (now - TimeValue.timeValueHours(8).getMillis()), "_rollup.id", job.getId() ) )); @@ -413,14 +389,13 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 5, "the_histo.date_histogram.time_zone", timeZone.toString(), - "the_histo.min.value", (double) (now - TimeValue.timeValueHours(6).getMillis()), - "the_histo.max.value", (double) now, "_rollup.id", job.getId() ) )); }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34762") public void testRandomizedDateHisto() throws Exception { String rollupIndex = randomAlphaOfLengthBetween(5, 10); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 3372667191106..2cf548eb4e1a9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -30,6 +30,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; @@ -152,6 +153,8 @@ private User getUser(String username, ReservedUserInfo userInfo) { return new BeatsSystemUser(userInfo.enabled); case APMSystemUser.NAME: return new APMSystemUser(userInfo.enabled); + case RemoteMonitoringUser.NAME: + return new RemoteMonitoringUser(userInfo.enabled); default: if (anonymousEnabled && anonymousUser.principal().equals(username)) { return anonymousUser; @@ -183,6 +186,9 @@ public void users(ActionListener> listener) { userInfo = reservedUserInfos.get(APMSystemUser.NAME); users.add(new APMSystemUser(userInfo == null || userInfo.enabled)); + userInfo = reservedUserInfos.get(RemoteMonitoringUser.NAME); + users.add(new RemoteMonitoringUser(userInfo == null || userInfo.enabled)); + if (anonymousEnabled) { users.add(anonymousUser); } @@ -236,6 +242,8 @@ private Version getDefinedVersion(String username) { return BeatsSystemUser.DEFINED_SINCE; case APMSystemUser.NAME: return APMSystemUser.DEFINED_SINCE; + case RemoteMonitoringUser.NAME: + return RemoteMonitoringUser.DEFINED_SINCE; default: return Version.V_6_0_0; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java index fad10c821c85d..691142a9405c2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.elasticsearch.xpack.security.authc.esnative.tool.HttpResponse.HttpResponseBuilder; @@ -65,7 +66,7 @@ public class SetupPasswordTool extends LoggingAwareMultiCommand { private static final char[] CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789").toCharArray(); public static final List USERS = asList(ElasticUser.NAME, APMSystemUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, - BeatsSystemUser.NAME); + BeatsSystemUser.NAME, RemoteMonitoringUser.NAME); private final BiFunction clientFunction; private final CheckedFunction keyStoreFunction; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SSLEngineUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SSLEngineUtils.java index 5bbcbaa050917..32b153b193574 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SSLEngineUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SSLEngineUtils.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.security.transport; import io.netty.channel.Channel; +import io.netty.channel.ChannelException; import io.netty.handler.ssl.SslHandler; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -59,7 +60,13 @@ public static SSLEngine getSSLEngine(TcpChannel tcpChannel) { if (tcpChannel instanceof Netty4TcpChannel) { Channel nettyChannel = ((Netty4TcpChannel) tcpChannel).getNettyChannel(); SslHandler handler = nettyChannel.pipeline().get(SslHandler.class); - assert handler != null : "Must have SslHandler"; + if (handler == null) { + if (nettyChannel.isOpen()) { + assert false : "Must have SslHandler"; + } else { + throw new ChannelException("Channel is closed."); + } + } return handler.engine(); } else if (tcpChannel instanceof NioTcpChannel) { SocketChannelContext context = ((NioTcpChannel) tcpChannel).getContext(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index 63a38b12a9e17..bc235e0918e0d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -17,12 +17,14 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Set; /** @@ -89,7 +91,9 @@ public void setupReservedPasswords(RestClient restClient) throws IOException { RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); optionsBuilder.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(ElasticUser.NAME, reservedPassword)); RequestOptions options = optionsBuilder.build(); - for (String username : Arrays.asList(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME)) { + final List usernames = Arrays.asList(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME, + RemoteMonitoringUser.NAME); + for (String username : usernames) { Request request = new Request("PUT", "/_xpack/security/user/" + username + "/_password"); request.setJsonEntity("{\"password\": \"" + new String(reservedPassword.getChars()) + "\"}"); request.setOptions(options); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java index 014599dedae83..41bd8bfc6e620 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; @@ -83,7 +84,7 @@ public void testPasswordUpsertWhenSetEnabledOnReservedUser() throws Exception { final NativeUsersStore nativeUsersStore = startNativeUsersStore(); final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, - BeatsSystemUser.NAME, APMSystemUser.NAME); + BeatsSystemUser.NAME, APMSystemUser.NAME, RemoteMonitoringUser.NAME); final PlainActionFuture future = new PlainActionFuture<>(); nativeUsersStore.setEnabled(user, true, WriteRequest.RefreshPolicy.IMMEDIATE, future); @@ -102,7 +103,7 @@ public void testBlankPasswordInIndexImpliesDefaultPassword() throws Exception { final NativeUsersStore nativeUsersStore = startNativeUsersStore(); final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, - BeatsSystemUser.NAME, APMSystemUser.NAME); + BeatsSystemUser.NAME, APMSystemUser.NAME, RemoteMonitoringUser.NAME); final Map values = new HashMap<>(); values.put(ENABLED_FIELD, Boolean.TRUE); values.put(PASSWORD_FIELD, BLANK_PASSWORD); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java index 8f7116dd9718c..59612d6227a71 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.junit.BeforeClass; import java.util.Arrays; @@ -52,7 +53,7 @@ public Settings nodeSettings(int nodeOrdinal) { public void testAuthenticate() { final List usernames = Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, - BeatsSystemUser.NAME, APMSystemUser.NAME); + BeatsSystemUser.NAME, APMSystemUser.NAME, RemoteMonitoringUser.NAME); for (String username : usernames) { ClusterHealthResponse response = client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) @@ -72,7 +73,7 @@ public void testAuthenticate() { public void testAuthenticateAfterEnablingUser() { final SecurityClient c = securityClient(); final List usernames = Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, - BeatsSystemUser.NAME, APMSystemUser.NAME); + BeatsSystemUser.NAME, APMSystemUser.NAME, RemoteMonitoringUser.NAME); for (String username : usernames) { c.prepareSetEnabled(username, true).get(); ClusterHealthResponse response = client() @@ -88,7 +89,7 @@ public void testAuthenticateAfterEnablingUser() { public void testChangingPassword() { String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, - BeatsSystemUser.NAME, APMSystemUser.NAME); + BeatsSystemUser.NAME, APMSystemUser.NAME, RemoteMonitoringUser.NAME); final char[] newPassword = "supersecretvalue".toCharArray(); if (randomBoolean()) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index 36d1690b8b202..53963f996daf6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.UsernamesField; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; @@ -264,7 +265,7 @@ public void testGetUsers() { reservedRealm.users(userFuture); assertThat(userFuture.actionGet(), containsInAnyOrder(new ElasticUser(true), new KibanaUser(true), new LogstashSystemUser(true), - new BeatsSystemUser(true), new APMSystemUser((true)))); + new BeatsSystemUser(true), new APMSystemUser(true), new RemoteMonitoringUser(true))); } public void testGetUsersDisabled() { @@ -396,7 +397,8 @@ public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexExists new AnonymousUser(Settings.EMPTY), securityIndex, threadPool); PlainActionFuture listener = new PlainActionFuture<>(); - final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME); + final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME, + RemoteMonitoringUser.NAME); doAnswer((i) -> { ActionListener callback = (ActionListener) i.getArguments()[1]; callback.onResponse(null); @@ -418,7 +420,8 @@ public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexDoesNo new AnonymousUser(Settings.EMPTY), securityIndex, threadPool); PlainActionFuture listener = new PlainActionFuture<>(); - final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME); + final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME, + RemoteMonitoringUser.NAME); reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, mockSecureSettings.getString("bootstrap.password")), listener); final AuthenticationResult result = listener.get(); assertThat(result.getStatus(), is(AuthenticationResult.Status.TERMINATE)); @@ -426,7 +429,7 @@ public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexDoesNo private User randomReservedUser(boolean enabled) { return randomFrom(new ElasticUser(enabled), new KibanaUser(enabled), new LogstashSystemUser(enabled), - new BeatsSystemUser(enabled), new APMSystemUser(enabled)); + new BeatsSystemUser(enabled), new APMSystemUser(enabled), new RemoteMonitoringUser(enabled)); } /* @@ -459,6 +462,10 @@ private void verifyVersionPredicate(String principal, Predicate version assertThat(versionPredicate.test(Version.V_6_4_0), is(false)); assertThat(versionPredicate.test(Version.V_6_5_0), is(true)); break; + case RemoteMonitoringUser.NAME: + assertThat(versionPredicate.test(Version.V_6_4_0), is(false)); + assertThat(versionPredicate.test(Version.V_6_5_0), is(true)); + break; default: assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndexAliasesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndexAliasesTests.java index dca113b6e4229..711ca517d98c3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndexAliasesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndexAliasesTests.java @@ -509,7 +509,7 @@ public void testGetAliasesAliasesOnlyPermissionStrict() { //security plugin lets it through, but es core intercepts it due to strict indices options and throws index not found IndexNotFoundException indexNotFoundException = expectThrows(IndexNotFoundException.class, client.admin().indices() .prepareGetAliases("alias_1").addIndices("test_1").setIndicesOptions(IndicesOptions.strictExpandOpen())::get); - assertEquals("no such index", indexNotFoundException.getMessage()); + assertEquals("no such index [test_1]", indexNotFoundException.getMessage()); //fails: no manage_aliases privilege on non_authorized alias assertThrowsAuthorizationException(client.admin().indices().prepareGetAliases("non_authorized").addIndices("test_1") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 4dc0909552c26..3e7543ffd9904 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -68,6 +68,9 @@ import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.elasticsearch.xpack.security.test.SecurityTestUtils; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; import org.junit.Before; import java.util.Arrays; @@ -469,7 +472,7 @@ public void testResolveNonMatchingIndicesDisallowNoIndices() { request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [missing*]", e.getMessage()); } public void testResolveExplicitIndicesStrict() { @@ -506,7 +509,7 @@ public void testResolveNoAuthorizedIndicesDisallowNoIndices() { request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, SearchAction.NAME))); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [[]]", e.getMessage()); } public void testResolveMissingIndexStrict() { @@ -848,7 +851,7 @@ public void testGetAliasesRequestMissingIndexIgnoreUnavailableDisallowNoIndices( request.aliases("alias2"); IndexNotFoundException exception = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(user, GetAliasesAction.NAME)).getLocal()); - assertEquals("no such index", exception.getMessage()); + assertEquals("no such index [[missing]]", exception.getMessage()); } public void testGetAliasesRequestMissingIndexIgnoreUnavailableAllowNoIndices() { @@ -928,7 +931,7 @@ public void testWildcardsGetAliasesRequestNoMatchingIndicesDisallowNoIndices() { request.indices("non_matching_*"); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(user, GetAliasesAction.NAME)).getLocal()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [non_matching_*]", e.getMessage()); } public void testWildcardsGetAliasesRequestNoMatchingIndicesAllowNoIndices() { @@ -995,7 +998,7 @@ public void testAllGetAliasesRequestNoAuthorizedIndicesDisallowNoIndices() { request.indices("_all"); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, GetAliasesAction.NAME))); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [[_all]]", e.getMessage()); } public void testWildcardsGetAliasesRequestNoAuthorizedIndicesAllowNoIndices() { @@ -1015,7 +1018,7 @@ public void testWildcardsGetAliasesRequestNoAuthorizedIndicesDisallowNoIndices() //current user is not authorized for any index, foo* resolves to no indices, the request fails IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, GetAliasesAction.NAME))); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [foo*]", e.getMessage()); } public void testResolveAllAliasesGetAliasesRequest() { @@ -1126,9 +1129,11 @@ public void testResolveAliasesAllGetAliasesRequestNoAuthorizedIndices() { public void testRemotableRequestsAllowRemoteIndices() { IndicesOptions options = IndicesOptions.fromOptions(true, false, false, false); Tuple tuple = randomFrom( - new Tuple<>(new SearchRequest("remote:foo").indicesOptions(options), SearchAction.NAME), - new Tuple<>(new FieldCapabilitiesRequest().indices("remote:foo").indicesOptions(options), FieldCapabilitiesAction.NAME), - new Tuple<>(new GraphExploreRequest().indices("remote:foo").indicesOptions(options), GraphExploreAction.NAME) + new Tuple(new SearchRequest("remote:foo").indicesOptions(options), SearchAction.NAME), + new Tuple(new FieldCapabilitiesRequest().indices("remote:foo").indicesOptions(options), + FieldCapabilitiesAction.NAME), + new Tuple(new GraphExploreRequest().indices("remote:foo").indicesOptions(options), + GraphExploreAction.NAME) ); final TransportRequest request = tuple.v1(); ResolvedIndices resolved = resolveIndices(request, buildAuthorizedIndices(user, tuple.v2())); @@ -1143,21 +1148,21 @@ public void testRemotableRequestsAllowRemoteIndices() { public void testNonRemotableRequestDoesNotAllowRemoteIndices() { IndicesOptions options = IndicesOptions.fromOptions(true, false, false, false); Tuple tuple = randomFrom( - new Tuple<>(new CloseIndexRequest("remote:foo").indicesOptions(options), CloseIndexAction.NAME), - new Tuple<>(new DeleteIndexRequest("remote:foo").indicesOptions(options), DeleteIndexAction.NAME), - new Tuple<>(new PutMappingRequest("remote:foo").indicesOptions(options), PutMappingAction.NAME) + new Tuple(new CloseIndexRequest("remote:foo").indicesOptions(options), CloseIndexAction.NAME), + new Tuple(new DeleteIndexRequest("remote:foo").indicesOptions(options), DeleteIndexAction.NAME), + new Tuple(new PutMappingRequest("remote:foo").indicesOptions(options), PutMappingAction.NAME) ); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(tuple.v1(), buildAuthorizedIndices(user, tuple.v2())).getLocal()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [[remote:foo]]", e.getMessage()); } public void testNonRemotableRequestDoesNotAllowRemoteWildcardIndices() { IndicesOptions options = IndicesOptions.fromOptions(randomBoolean(), true, true, true); Tuple tuple = randomFrom( - new Tuple<>(new CloseIndexRequest("*:*").indicesOptions(options), CloseIndexAction.NAME), - new Tuple<>(new DeleteIndexRequest("*:*").indicesOptions(options), DeleteIndexAction.NAME), - new Tuple<>(new PutMappingRequest("*:*").indicesOptions(options), PutMappingAction.NAME) + new Tuple(new CloseIndexRequest("*:*").indicesOptions(options), CloseIndexAction.NAME), + new Tuple(new DeleteIndexRequest("*:*").indicesOptions(options), DeleteIndexAction.NAME), + new Tuple(new PutMappingRequest("*:*").indicesOptions(options), PutMappingAction.NAME) ); final ResolvedIndices resolved = resolveIndices(tuple.v1(), buildAuthorizedIndices(user, tuple.v2())); assertNoIndices((IndicesRequest.Replaceable) tuple.v1(), resolved); @@ -1267,15 +1272,17 @@ public void testUnauthorizedDateMathExpressionIgnoreUnavailableDisallowNoIndices request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); - assertEquals("no such index" , e.getMessage()); + assertEquals("no such index [[]]" , e.getMessage()); } public void testUnauthorizedDateMathExpressionStrict() { + String expectedIndex = "datetime-" + DateTimeFormat.forPattern("YYYY.MM.dd").print( + new DateTime(DateTimeZone.UTC).monthOfYear().roundFloorCopy()); SearchRequest request = new SearchRequest(""); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); - assertEquals("no such index" , e.getMessage()); + assertEquals("no such index [" + expectedIndex + "]" , e.getMessage()); } public void testResolveDateMathExpression() { @@ -1307,15 +1314,17 @@ public void testMissingDateMathExpressionIgnoreUnavailableDisallowNoIndices() { request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); - assertEquals("no such index" , e.getMessage()); + assertEquals("no such index [[]]" , e.getMessage()); } public void testMissingDateMathExpressionStrict() { + String expectedIndex = "foobar-" + DateTimeFormat.forPattern("YYYY.MM.dd").print( + new DateTime(DateTimeZone.UTC).monthOfYear().roundFloorCopy()); SearchRequest request = new SearchRequest(""); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); - assertEquals("no such index" , e.getMessage()); + assertEquals("no such index [" + expectedIndex + "]" , e.getMessage()); } public void testAliasDateMathExpressionNotSupported() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java index 76568d3d48b5a..e9ed559ab8e47 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java @@ -73,7 +73,7 @@ public void testSearchNonAuthorizedWildcardDisallowNoIndices() { createIndicesWithRandomAliases("test1", "test2", "index1", "index2"); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("index*") .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [index*]", e.getMessage()); } public void testEmptyClusterSearchForAll() { @@ -83,7 +83,7 @@ public void testEmptyClusterSearchForAll() { public void testEmptyClusterSearchForAllDisallowNoIndices() { IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch() .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [[]]", e.getMessage()); } public void testEmptyClusterSearchForWildcard() { @@ -94,7 +94,7 @@ public void testEmptyClusterSearchForWildcard() { public void testEmptyClusterSearchForWildcardDisallowNoIndices() { IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("*") .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [*]", e.getMessage()); } public void testEmptyAuthorizedIndicesSearchForAll() { @@ -106,7 +106,7 @@ public void testEmptyAuthorizedIndicesSearchForAllDisallowNoIndices() { createIndicesWithRandomAliases("index1", "index2"); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch() .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [[]]", e.getMessage()); } public void testEmptyAuthorizedIndicesSearchForWildcard() { @@ -118,7 +118,7 @@ public void testEmptyAuthorizedIndicesSearchForWildcardDisallowNoIndices() { createIndicesWithRandomAliases("index1", "index2"); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("*") .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [*]", e.getMessage()); } public void testExplicitNonAuthorizedIndex() { @@ -277,7 +277,7 @@ public void testMultiSearchMissingAuthorizedIndex() { assertReturnedIndices(multiSearchResponse.getResponses()[0].getResponse(), "test1", "test2", "test3"); assertTrue(multiSearchResponse.getResponses()[1].isFailure()); assertThat(multiSearchResponse.getResponses()[1].getFailure().toString(), - equalTo("[test4] IndexNotFoundException[no such index]")); + equalTo("[test4] IndexNotFoundException[no such index [test4]]")); } { //we set ignore_unavailable and allow_no_indices to true, no errors returned, second item doesn't have hits. diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java index 2e1a423d5fdf8..077edf22c91ca 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java @@ -21,7 +21,6 @@ import org.elasticsearch.transport.AbstractSimpleTransportTestCase; import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.ConnectionManager; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportRequestOptions; @@ -111,7 +110,7 @@ public void testTcpHandshake() throws IOException, InterruptedException { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); TcpTransport originalTransport = (TcpTransport) serviceA.getOriginalTransport(); - ConnectionProfile connectionProfile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY); + ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY); try (TransportService service = buildService("TS_TPC", Version.CURRENT, null); TcpTransport.NodeChannels connection = originalTransport.openConnection( new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java index 88895034df9de..291b39f4b05ba 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java @@ -93,7 +93,7 @@ protected Version getCurrentVersion() { }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, + MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java index 5208d58d74390..7fd4d8b5e0319 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java @@ -55,7 +55,7 @@ protected Version getCurrentVersion() { }; MockTransportService mockTransportService = - MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, + MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); mockTransportService.start(); return mockTransportService; diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java index 2decfe5d3c5c5..940e2c757df44 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -30,7 +30,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.function.Function; -import java.util.stream.Collectors; import static java.lang.String.format; import static java.util.Calendar.DAY_OF_MONTH; @@ -41,6 +40,7 @@ import static java.util.Calendar.MONTH; import static java.util.Calendar.SECOND; import static java.util.Calendar.YEAR; +import static java.util.stream.Collectors.toMap; /** * Conversion utilities for conversion of JDBC types to Java type and back @@ -52,9 +52,7 @@ */ final class TypeConverter { - private TypeConverter() { - - } + private TypeConverter() {} private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; private static final Map, SQLType> javaToJDBC; @@ -64,9 +62,10 @@ private TypeConverter() { Map, SQLType> aMap = Arrays.stream(DataType.values()) .filter(dataType -> dataType.javaClass() != null && dataType != DataType.HALF_FLOAT + && dataType != DataType.IP && dataType != DataType.SCALED_FLOAT && dataType != DataType.TEXT) - .collect(Collectors.toMap(dataType -> dataType.javaClass(), dataType -> dataType.jdbcType)); + .collect(toMap(dataType -> dataType.javaClass(), dataType -> dataType.jdbcType)); // apart from the mappings in {@code DataType} three more Java classes can be mapped to a {@code JDBCType.TIMESTAMP} // according to B-4 table from the jdbc4.2 spec aMap.put(Calendar.class, JDBCType.TIMESTAMP); diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index 970be02e38572..da4037ac95c64 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -167,7 +167,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Serializes the provided value in SQL-compatible way based on the client mode */ public static XContentBuilder value(XContentBuilder builder, Mode mode, Object value) throws IOException { - if (mode == Mode.JDBC && value instanceof ReadableDateTime) { + if (Mode.isDriver(mode) && value instanceof ReadableDateTime) { // JDBC cannot parse dates in string format builder.value(((ReadableDateTime) value).getMillis()); } else { diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java index 0dca4a88f0592..3f894ae59af8e 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java @@ -47,6 +47,7 @@ public class JreHttpUrlConnection implements Closeable { * error. */ public static final String SQL_STATE_BAD_SERVER = "bad_server"; + private static final String SQL_NOT_AVAILABLE_ERROR_MESSAGE = "request [/_xpack/sql] contains unrecognized parameter: [mode]"; public static R http(String path, String query, ConnectionConfiguration cfg, Function handler) { final URI uriPath = cfg.baseUri().resolve(path); // update path if needed @@ -176,6 +177,19 @@ private ResponseOrException parserError() throws IOException { } SqlExceptionType type = SqlExceptionType.fromRemoteFailureType(failure.type()); if (type == null) { + // check if x-pack or sql are not available (x-pack not installed or sql not enabled) + // by checking the error message the server is sending back + if (con.getResponseCode() >= HttpURLConnection.HTTP_BAD_REQUEST + && failure.reason().contains(SQL_NOT_AVAILABLE_ERROR_MESSAGE)) { + return new ResponseOrException<>(new SQLException("X-Pack/SQL do not seem to be available" + + " on the Elasticsearch node using the access path '" + + con.getURL().getHost() + + (con.getURL().getPort() > 0 ? ":" + con.getURL().getPort() : "") + + "'." + + " Please verify X-Pack is installed and SQL enabled. Alternatively, check if any proxy is interfering" + + " the communication to Elasticsearch", + SQL_STATE_BAD_SERVER)); + } return new ResponseOrException<>(new SQLException("Server sent bad type [" + failure.type() + "]. Original type was [" + failure.reason() + "]. [" + failure.remoteTrace() + "]", SQL_STATE_BAD_SERVER)); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java index 02f175ca80d79..598c52a91797b 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java @@ -13,7 +13,8 @@ */ public enum Mode { PLAIN, - JDBC; + JDBC, + ODBC; public static Mode fromString(String mode) { if (mode == null) { @@ -27,4 +28,8 @@ public static Mode fromString(String mode) { public String toString() { return this.name().toLowerCase(Locale.ROOT); } + + public static boolean isDriver(Mode mode) { + return mode == JDBC || mode == ODBC; + } } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index 1c9cf6ac9257f..3ad3b9090a543 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -12,7 +12,8 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; -import java.util.stream.Collectors; + +import static java.util.stream.Collectors.toMap; /** * Elasticsearch data types that supported by SQL interface @@ -42,7 +43,13 @@ public enum DataType { // since ODBC and JDBC interpret precision for Date as display size, // the precision is 23 (number of chars in ISO8601 with millis) + Z (the UTC timezone) // see https://github.com/elastic/elasticsearch/issues/30386#issuecomment-386807288 - DATE( JDBCType.TIMESTAMP, Timestamp.class, Long.BYTES, 24, 24); + DATE( JDBCType.TIMESTAMP, Timestamp.class, Long.BYTES, 24, 24), + // + // specialized types + // + // IP can be v4 or v6. The latter has 2^128 addresses or 340,282,366,920,938,463,463,374,607,431,768,211,456 + // aka 39 chars + IP( JDBCType.VARCHAR, String.class, 39, 39, 0,false, false, true); // @formatter:on public static final String ODBC_DATATYPE_PREFIX = "SQL_"; @@ -52,8 +59,9 @@ public enum DataType { static { jdbcToEs = Arrays.stream(DataType.values()) - .filter(dataType -> dataType != TEXT && dataType != NESTED && dataType != SCALED_FLOAT) // Remove duplicates - .collect(Collectors.toMap(dataType -> dataType.jdbcType, dataType -> dataType)); + .filter(type -> type != TEXT && type != NESTED + && type != SCALED_FLOAT && type != IP) // Remove duplicates + .collect(toMap(dataType -> dataType.jdbcType, dataType -> dataType)); odbcToEs = new HashMap<>(36); @@ -233,12 +241,9 @@ public static DataType fromEsType(String esType) { public boolean isCompatibleWith(DataType other) { if (this == other) { return true; - } else if (isString() && other.isString()) { - return true; - } else if (isNumeric() && other.isNumeric()) { - return true; - } else { - return false; - } + } else return + (this == NULL || other == NULL) || + (isString() && other.isString()) || + (isNumeric() && other.isNumeric()); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index e5ab3ce082b71..c8834240c6ceb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.analysis.analyzer; import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.expression.Alias; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.AttributeSet; import org.elasticsearch.xpack.sql.expression.Exists; @@ -24,6 +25,7 @@ import org.elasticsearch.xpack.sql.plan.logical.Filter; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; import org.elasticsearch.xpack.sql.tree.Node; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.StringUtils; @@ -237,8 +239,17 @@ private static boolean checkGroupByOrder(LogicalPlan p, Set localFailur Set groupingFailures, Map functions) { if (p instanceof OrderBy) { OrderBy o = (OrderBy) p; - if (o.child() instanceof Aggregate) { - Aggregate a = (Aggregate) o.child(); + LogicalPlan child = o.child(); + + if (child instanceof Project) { + child = ((Project) child).child(); + } + if (child instanceof Filter) { + child = ((Filter) child).child(); + } + + if (child instanceof Aggregate) { + Aggregate a = (Aggregate) child; Map> missing = new LinkedHashMap<>(); o.order().forEach(oe -> { @@ -249,9 +260,25 @@ private static boolean checkGroupByOrder(LogicalPlan p, Set localFailur return; } - // make sure to compare attributes directly - if (Expressions.anyMatch(a.groupings(), - g -> e.semanticEquals(e instanceof Attribute ? Expressions.attribute(g) : g))) { + // take aliases declared inside the aggregates which point to the grouping (but are not included in there) + // to correlate them to the order + List groupingAndMatchingAggregatesAliases = new ArrayList<>(a.groupings()); + + a.aggregates().forEach(as -> { + if (as instanceof Alias) { + Alias al = (Alias) as; + if (Expressions.anyMatch(a.groupings(), g -> Expressions.equalsAsAttribute(al.child(), g))) { + groupingAndMatchingAggregatesAliases.add(al); + } + } + }); + + // Make sure you can apply functions on top of the grouped by expressions in the ORDER BY: + // e.g.: if "GROUP BY f2(f1(field))" you can "ORDER BY f4(f3(f2(f1(field))))" + // + // Also, make sure to compare attributes directly + if (e.anyMatch(expression -> Expressions.anyMatch(groupingAndMatchingAggregatesAliases, + g -> expression.semanticEquals(expression instanceof Attribute ? Expressions.attribute(g) : g)))) { return; } @@ -274,7 +301,6 @@ private static boolean checkGroupByOrder(LogicalPlan p, Set localFailur return true; } - private static boolean checkGroupByHaving(LogicalPlan p, Set localFailures, Set groupingFailures, Map functions) { if (p instanceof Filter) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java index e9a37240be091..5b25ac3df9285 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.expression; +import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -131,16 +132,17 @@ public static TypeResolution typeMustBe(Expression e, Predicate pred } public static TypeResolution typeMustBeNumeric(Expression e) { - return e.dataType().isNumeric() ? TypeResolution.TYPE_RESOLVED : new TypeResolution(numericErrorMessage(e)); + return e.dataType().isNumeric() ? TypeResolution.TYPE_RESOLVED : new TypeResolution(incorrectTypeErrorMessage(e, "numeric")); } public static TypeResolution typeMustBeNumericOrDate(Expression e) { return e.dataType().isNumeric() || e.dataType() == DataType.DATE ? TypeResolution.TYPE_RESOLVED : - new TypeResolution(numericErrorMessage(e)); + new TypeResolution(incorrectTypeErrorMessage(e, "numeric", "date")); } - - private static String numericErrorMessage(Expression e) { - return "Argument required to be numeric ('" + Expressions.name(e) + "' of type '" + e.dataType().esType + "')"; + + private static String incorrectTypeErrorMessage(Expression e, String...acceptedTypes) { + return "Argument required to be " + Strings.arrayToDelimitedString(acceptedTypes, " or ") + + " ('" + Expressions.name(e) + "' type is '" + e.dataType().esType + "')"; } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java index 6e06a1d1c8581..8c672ed162ee6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java @@ -48,7 +48,7 @@ public static double doubleValueOf(Expression e) { public static List valuesOf(List list, DataType to) { List l = new ArrayList<>(list.size()); for (Expression e : list) { - l.add(valueOf(e, to)); + l.add(valueOf(e, to)); } return l; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java index a820833d1a013..9b16b77511ca7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java @@ -42,7 +42,7 @@ public class In extends NamedExpression implements ScriptWeaver { public In(Location location, Expression value, List list) { super(location, null, CollectionUtils.combine(list, value), null); this.value = value; - this.list = list.stream().distinct().collect(Collectors.toList()); + this.list = new ArrayList<>(new LinkedHashSet<>(list)); } @Override @@ -78,20 +78,30 @@ public boolean nullable() { @Override public boolean foldable() { - return Expressions.foldable(children()); + return Expressions.foldable(children()) || + (Expressions.foldable(list) && list().stream().allMatch(e -> e.dataType() == DataType.NULL)); } @Override - public Object fold() { - Object foldedLeftValue = value.fold(); + public Boolean fold() { + if (value.dataType() == DataType.NULL) { + return null; + } + if (list.size() == 1 && list.get(0).dataType() == DataType.NULL) { + return false; + } + Object foldedLeftValue = value.fold(); + Boolean result = false; for (Expression rightValue : list) { Boolean compResult = Comparisons.eq(foldedLeftValue, rightValue.fold()); - if (compResult != null && compResult) { + if (compResult == null) { + result = null; + } else if (compResult) { return true; } } - return false; + return result; } @Override @@ -118,15 +128,18 @@ public ScriptTemplate asScript() { String scriptPrefix = leftScript + "=="; LinkedHashSet values = list.stream().map(Expression::fold).collect(Collectors.toCollection(LinkedHashSet::new)); for (Object valueFromList : values) { - if (valueFromList instanceof Expression) { - ScriptTemplate rightScript = asScript((Expression) valueFromList); - sj.add(scriptPrefix + rightScript.template()); - rightParams.add(rightScript.params()); - } else { - if (valueFromList instanceof String) { - sj.add(scriptPrefix + '"' + valueFromList + '"'); + // if checked against null => false + if (valueFromList != null) { + if (valueFromList instanceof Expression) { + ScriptTemplate rightScript = asScript((Expression) valueFromList); + sj.add(scriptPrefix + rightScript.template()); + rightParams.add(rightScript.params()); } else { - sj.add(scriptPrefix + valueFromList.toString()); + if (valueFromList instanceof String) { + sj.add(scriptPrefix + '"' + valueFromList + '"'); + } else { + sj.add(scriptPrefix + valueFromList.toString()); + } } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java index 5ebf8870965b5..0a901b5b5e6fe 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java @@ -40,14 +40,17 @@ public final void writeTo(StreamOutput out) throws IOException { @Override public Object process(Object input) { Object leftValue = processsors.get(processsors.size() - 1).process(input); + Boolean result = false; for (int i = 0; i < processsors.size() - 1; i++) { Boolean compResult = Comparisons.eq(leftValue, processsors.get(i).process(input)); - if (compResult != null && compResult) { + if (compResult == null) { + result = null; + } else if (compResult) { return true; } } - return false; + return result; } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java index ab40b076fac85..96e64f3b39ca7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java @@ -67,8 +67,8 @@ public List output() { @Override public final void execute(SqlSession session, ActionListener listener) { List> rows = Stream.of(DataType.values()) - // sort by SQL int type (that's what the JDBC/ODBC specs want) - .sorted(Comparator.comparing(t -> t.jdbcType.getVendorTypeNumber())) + // sort by SQL int type (that's what the JDBC/ODBC specs want) followed by name + .sorted(Comparator.comparing((DataType t) -> t.jdbcType.getVendorTypeNumber()).thenComparing(DataType::sqlName)) .map(t -> asList(t.esType.toUpperCase(Locale.ROOT), t.jdbcType.getVendorTypeNumber(), //https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/column-size?view=sql-server-2017 diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java index 6a0b96f444a04..f27cec678094e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java @@ -64,7 +64,7 @@ protected PhysicalPlan map(LogicalPlan p) { } if (p instanceof LocalRelation) { - return new LocalExec(p.location(), (LocalRelation) p); + return new LocalExec(p.location(), ((LocalRelation) p).executable()); } if (p instanceof Project) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java index 1d61cb1be46a9..3605898210fc5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.planner; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.execution.search.AggRef; import org.elasticsearch.xpack.sql.expression.Alias; import org.elasticsearch.xpack.sql.expression.Attribute; @@ -525,8 +526,12 @@ private static class PropagateEmptyLocal extends FoldingRule { protected PhysicalPlan rule(PhysicalPlan plan) { if (plan.children().size() == 1) { PhysicalPlan p = plan.children().get(0); - if (p instanceof LocalExec && ((LocalExec) p).isEmpty()) { - return new LocalExec(plan.location(), new EmptyExecutable(plan.output())); + if (p instanceof LocalExec) { + if (((LocalExec) p).isEmpty()) { + return new LocalExec(plan.location(), new EmptyExecutable(plan.output())); + } else { + throw new SqlIllegalArgumentException("Encountered a bug; {} is a LocalExec but is not empty", p); + } } } return plan; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java index 6c026b2607161..b22abaa65d781 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java @@ -64,6 +64,11 @@ public SqlPlugin(Settings settings) { throw LicenseUtils.newComplianceException("jdbc"); } break; + case ODBC: + if (licenseState.isOdbcAllowed() == false) { + throw LicenseUtils.newComplianceException("odbc"); + } + break; case PLAIN: if (licenseState.isSqlAllowed() == false) { throw LicenseUtils.newComplianceException(XPackField.SQL); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index e491f76749bdc..689dd365f76e4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.sql.action.SqlQueryResponse; import org.elasticsearch.xpack.sql.execution.PlanExecutor; import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursors; import org.elasticsearch.xpack.sql.session.RowSet; @@ -30,7 +31,6 @@ import java.util.List; import static java.util.Collections.unmodifiableList; -import static org.elasticsearch.xpack.sql.proto.Mode.JDBC; public class TransportSqlQueryAction extends HandledTransportAction { private final PlanExecutor planExecutor; @@ -73,7 +73,7 @@ public static void operation(PlanExecutor planExecutor, SqlQueryRequest request, static SqlQueryResponse createResponse(SqlQueryRequest request, SchemaRowSet rowSet) { List columns = new ArrayList<>(rowSet.columnCount()); for (Schema.Entry entry : rowSet.schema()) { - if (request.mode() == JDBC) { + if (Mode.isDriver(request.mode())) { columns.add(new ColumnInfo("", entry.name(), entry.type().esType, entry.type().jdbcType, entry.type().displaySize)); } else { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java index ea71156b0b05f..14b51a942ad4f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java @@ -14,7 +14,7 @@ import java.util.Map; import java.util.Objects; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketSelector; +import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketSelector; public class AggFilter extends PipelineAgg { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermsQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermsQuery.java index 412df4e8ca682..91ea49a8a3ce3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermsQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermsQuery.java @@ -9,22 +9,30 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Objects; +import java.util.Set; import static org.elasticsearch.index.query.QueryBuilders.termsQuery; public class TermsQuery extends LeafQuery { private final String term; - private final LinkedHashSet values; + private final Set values; public TermsQuery(Location location, String term, List values) { super(location); this.term = term; - this.values = new LinkedHashSet<>(Foldables.valuesOf(values, values.get(0).dataType())); + values.removeIf(e -> e.dataType() == DataType.NULL); + if (values.isEmpty()) { + this.values = Collections.emptySet(); + } else { + this.values = new LinkedHashSet<>(Foldables.valuesOf(values, values.get(0).dataType())); + } } @Override diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index c193dcfd5461f..e69b694968a25 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.TypesTests; @@ -34,6 +35,13 @@ private String verify(IndexResolution getIndexResult, String sql) { return e.getMessage().substring(header.length()); } + private LogicalPlan accepted(String sql) { + Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json"); + EsIndex test = new EsIndex("test", mapping); + Analyzer analyzer = new Analyzer(new FunctionRegistry(), IndexResolution.valid(test), TimeZone.getTimeZone("UTC")); + return analyzer.analyze(parser.createStatement(sql), true); + } + public void testMissingIndex() { assertEquals("1:17: Unknown index [missing]", verify(IndexResolution.notFound("missing"), "SELECT foo FROM missing")); } @@ -110,11 +118,26 @@ public void testGroupByOrderByNonGrouped() { verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY bool")); } + public void testGroupByOrderByNonGrouped_WithHaving() { + assertEquals("1:71: Cannot order by non-grouped column [bool], expected [text]", + verify("SELECT MAX(int) FROM test GROUP BY text HAVING MAX(int) > 10 ORDER BY bool")); + } + + public void testGroupByOrderByAliasedInSelectAllowed() { + LogicalPlan lp = accepted("SELECT text t FROM test GROUP BY text ORDER BY t"); + assertNotNull(lp); + } + public void testGroupByOrderByScalarOverNonGrouped() { assertEquals("1:50: Cannot order by non-grouped column [YEAR(date [UTC])], expected [text]", verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY YEAR(date)")); } + public void testGroupByOrderByScalarOverNonGrouped_WithHaving() { + assertEquals("1:71: Cannot order by non-grouped column [YEAR(date [UTC])], expected [text]", + verify("SELECT MAX(int) FROM test GROUP BY text HAVING MAX(int) > 10 ORDER BY YEAR(date)")); + } + public void testGroupByHavingNonGrouped() { assertEquals("1:48: Cannot filter by non-grouped column [int], expected [text]", verify("SELECT AVG(int) FROM test GROUP BY text HAVING int > 10")); @@ -126,7 +149,7 @@ public void testGroupByAggregate() { } public void testNotSupportedAggregateOnDate() { - assertEquals("1:8: Argument required to be numeric ('date' of type 'date')", + assertEquals("1:8: Argument required to be numeric ('date' type is 'date')", verify("SELECT AVG(date) FROM test")); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/InProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/InProcessorTests.java index 3e71ac90f8127..12bba003115f4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/InProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/InProcessorTests.java @@ -22,6 +22,7 @@ public class InProcessorTests extends AbstractWireSerializingTestCase names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", - "KEYWORD", "TEXT", "BOOLEAN", "DATE", "UNSUPPORTED", "OBJECT", "NESTED"); + "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "UNSUPPORTED", "OBJECT", "NESTED"); cmd.execute(null, ActionListener.wrap(r -> { assertEquals(19, r.columnCount()); - assertEquals(17, r.size()); + assertEquals(DataType.values().length, r.size()); assertFalse(r.schema().types().contains(DataType.NULL)); // test numeric as signed assertFalse(r.column(9, Boolean.class)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java new file mode 100644 index 0000000000000..5fac14e2397db --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.test.AbstractBuilderTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.optimizer.Optimizer; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.physical.LocalExec; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.TypesTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.util.Map; +import java.util.TimeZone; + +import static org.hamcrest.Matchers.startsWith; + +public class QueryFolderTests extends AbstractBuilderTestCase { + + private static SqlParser parser; + private static Analyzer analyzer; + private static Optimizer optimizer; + private static Planner planner; + + @BeforeClass + public static void init() { + parser = new SqlParser(); + + Map mapping = TypesTests.loadMapping("mapping-multi-field-variation.json"); + EsIndex test = new EsIndex("test", mapping); + IndexResolution getIndexResult = IndexResolution.valid(test); + analyzer = new Analyzer(new FunctionRegistry(), getIndexResult, TimeZone.getTimeZone("UTC")); + optimizer = new Optimizer(); + planner = new Planner(); + } + + @AfterClass + public static void destroy() { + parser = null; + analyzer = null; + } + + private PhysicalPlan plan(String sql) { + return planner.plan(optimizer.optimize(analyzer.analyze(parser.createStatement(sql), true)), true); + } + + public void testFoldingToLocalExecWithProject() { + PhysicalPlan p = plan("SELECT keyword FROM test WHERE 1 = 2"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + } + + public void testFoldingToLocalExecWithProject_FoldableIn() { + PhysicalPlan p = plan("SELECT keyword FROM test WHERE int IN (null, null)"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + } + + public void testFoldingToLocalExecWithProject_WithOrderAndLimit() { + PhysicalPlan p = plan("SELECT keyword FROM test WHERE 1 = 2 ORDER BY int LIMIT 10"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + } + + public void testFoldingToLocalExecWithProjectWithGroupBy_WithOrderAndLimit() { + PhysicalPlan p = plan("SELECT keyword, max(int) FROM test WHERE 1 = 2 GROUP BY keyword ORDER BY 1 LIMIT 10"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(2, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + assertThat(ee.output().get(1).toString(), startsWith("MAX(int){a->")); + } + + public void testFoldingToLocalExecWithProjectWithGroupBy_WithHaving_WithOrderAndLimit() { + PhysicalPlan p = plan("SELECT keyword, max(int) FROM test GROUP BY keyword HAVING 1 = 2 ORDER BY 1 LIMIT 10"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(2, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + assertThat(ee.output().get(1).toString(), startsWith("MAX(int){a->")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 8d5db634ff073..c1e5a0d2dafad 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -173,6 +173,19 @@ public void testTranslateInExpression_WhereClause() throws IOException { assertEquals("keyword:(bar foo lala)", tq.asBuilder().toQuery(createShardContext()).toString()); } + public void testTranslateInExpression_WhereClauseAndNullHandling() throws IOException { + LogicalPlan p = plan("SELECT * FROM test WHERE keyword IN ('foo', null, 'lala', null, 'foo', concat('la', 'la'))"); + assertTrue(p instanceof Project); + assertTrue(p.children().get(0) instanceof Filter); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof TermsQuery); + TermsQuery tq = (TermsQuery) query; + assertEquals("keyword:(foo lala)", tq.asBuilder().toQuery(createShardContext()).toString()); + } + public void testTranslateInExpressionInvalidValues_WhereClause() { LogicalPlan p = plan("SELECT * FROM test WHERE keyword IN ('foo', 'bar', keyword)"); assertTrue(p instanceof Project); @@ -196,4 +209,17 @@ public void testTranslateInExpression_HavingClause_Painless() { assertEquals("InternalSqlScriptUtils.nullSafeFilter(params.a0==10 || params.a0==20)", sq.script().toString()); assertThat(sq.script().params().toString(), startsWith("[{a=MAX(int){a->")); } + + public void testTranslateInExpression_HavingClauseAndNullHandling_Painless() { + LogicalPlan p = plan("SELECT keyword, max(int) FROM test GROUP BY keyword HAVING max(int) in (10, null, 20, null, 30 - 10)"); + assertTrue(p instanceof Project); + assertTrue(p.children().get(0) instanceof Filter); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertTrue(translation.query instanceof ScriptQuery); + ScriptQuery sq = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(params.a0==10 || params.a0==20)", sq.script().toString()); + assertThat(sq.script().params().toString(), startsWith("[{a=MAX(int){a->")); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java index 90fd739296081..4c763fa95cd26 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java @@ -147,6 +147,7 @@ public void testTransform() throws Exception { /** * Test {@link Node#replaceChildren} implementation on {@link #subclass}. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34775") public void testReplaceChildren() throws Exception { Constructor ctor = longestCtor(subclass); Object[] nodeCtorArgs = ctorArgs(ctor); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java index 30f9d82ff77af..5a612fdbe6117 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -183,6 +183,13 @@ public void testGeoField() { assertThat(dt.getDataType().esType, is("unsupported")); } + public void testIpField() { + Map mapping = loadMapping("mapping-ip.json"); + assertThat(mapping.size(), is(1)); + EsField dt = mapping.get("ip_addr"); + assertThat(dt.getDataType().esType, is("ip")); + } + public void testUnsupportedTypes() { Map mapping = loadMapping("mapping-unsupported.json"); EsField dt = mapping.get("range"); diff --git a/x-pack/plugin/sql/src/test/resources/mapping-ip.json b/x-pack/plugin/sql/src/test/resources/mapping-ip.json new file mode 100644 index 0000000000000..19211b82b0a7e --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-ip.json @@ -0,0 +1,7 @@ +{ + "properties" : { + "ip_addr" : { + "type" : "ip" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/40_get_user_privs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/40_get_user_privs.yml index eccd37565c724..2019d4586a7ab 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/40_get_user_privs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/40_get_user_privs.yml @@ -202,6 +202,9 @@ teardown: --- "Test get_user_privileges for single role": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: headers: { Authorization: "Basic dGVzdC0xOjEyMzQ1Njc4" } # test-1 xpack.security.get_user_privileges: {} @@ -261,6 +264,9 @@ teardown: --- "Test get_user_privileges for merged roles": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: headers: { Authorization: "Basic dGVzdC0zOjEyMzQ1Njc4" } # test-3 xpack.security.get_user_privileges: {} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml index ebf953c93527a..861be094fa62d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml @@ -62,10 +62,6 @@ setup: - "min" - "max" - "sum" - - field: "the_field" - metrics: - - "max" - - "min" timeout: "20s" stats: pages_processed: 0 @@ -113,10 +109,6 @@ setup: - "min" - "max" - "sum" - - field: "the_field" - metrics: - - "max" - - "min" timeout: "20s" stats: pages_processed: 0 @@ -164,10 +156,6 @@ setup: - "min" - "max" - "sum" - - field: "the_field" - metrics: - - "max" - - "min" timeout: "20s" stats: pages_processed: 0 diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index 7af7f858f4f8c..759ddbad2b463 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -63,10 +63,6 @@ setup: - "min" - "max" - "sum" - - field: "the_field" - metrics: - - "max" - - "min" timeout: "20s" stats: pages_processed: 0 @@ -178,10 +174,6 @@ setup: - "min" - "max" - "sum" - - field: "the_field" - metrics: - - "max" - - "min" timeout: "20s" stats: pages_processed: 0 @@ -208,10 +200,6 @@ setup: - "min" - "max" - "sum" - - field: "the_field" - metrics: - - "max" - - "min" timeout: "20s" stats: pages_processed: 0 diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml index 145953e24906e..f8bb401a7721e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml @@ -77,8 +77,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -126,8 +124,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -140,8 +136,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -215,8 +209,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -229,8 +221,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -246,8 +236,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml index 751b0b2b89a64..bd49f2c338906 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml @@ -77,8 +77,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -126,8 +124,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -140,8 +136,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -190,8 +184,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -265,8 +257,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -279,8 +269,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -295,8 +283,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -374,8 +360,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -388,8 +372,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -404,8 +386,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" @@ -479,8 +459,6 @@ setup: - agg: "date_histogram" interval: "1h" time_zone: "UTC" - - agg: "max" - - agg: "min" value_field: - agg: "min" - agg: "max" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index 483be951e8aed..cbb6f8956b14f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -63,10 +63,6 @@ setup: - "min" - "max" - "sum" - - field: "the_field" - metrics: - - "max" - - "min" timeout: "20s" stats: pages_processed: 0 diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml index 650983b5cff7c..57bfd821ea24d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml @@ -173,8 +173,6 @@ teardown: hits.hits.0._source: timestamp.date_histogram.time_zone: "UTC" timestamp.date_histogram.timestamp: 0 - timestamp.max.value: 123.0 - timestamp.min.value: 123.0 value_field.max.value: 1232.0 _rollup.version: 2 timestamp.date_histogram.interval: "1s" @@ -336,8 +334,6 @@ teardown: hits.hits.0._source: timestamp.date_histogram.time_zone: "UTC" timestamp.date_histogram.timestamp: 0 - timestamp.max.value: 123.0 - timestamp.min.value: 123.0 value_field.max.value: 1232.0 _rollup.version: 2 timestamp.date_histogram.interval: "1s" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml index 2aea0126e9e47..47e1a1160b580 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml @@ -1,6 +1,9 @@ # Integration tests for monitoring # "X-Pack loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains - do: cluster.state: {} diff --git a/x-pack/plugin/upgrade/build.gradle b/x-pack/plugin/upgrade/build.gradle index 56ce274dd1166..309962fa487ff 100644 --- a/x-pack/plugin/upgrade/build.gradle +++ b/x-pack/plugin/upgrade/build.gradle @@ -1,5 +1,4 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask -import org.elasticsearch.gradle.BuildPlugin evaluationDependsOn(xpackModule('core')) @@ -34,7 +33,6 @@ task internalClusterTest(type: RandomizedTestingTask, group: JavaBasePlugin.VERIFICATION_GROUP, description: 'Multi-node tests', dependsOn: test.dependsOn) { - configure(BuildPlugin.commonTestConfig(project)) classpath = project.test.classpath testClassesDirs = project.test.testClassesDirs include '**/*IT.class' diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 14e96678d1488..f87d9454f2d77 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -50,7 +50,6 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.SearchScript; import org.elasticsearch.script.TemplateScript; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; @@ -223,9 +222,6 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa Setting.byteSizeSetting("xpack.watcher.bulk.size", new ByteSizeValue(1, ByteSizeUnit.MB), new ByteSizeValue(1, ByteSizeUnit.MB), new ByteSizeValue(10, ByteSizeUnit.MB), NodeScope); - - public static final ScriptContext SCRIPT_SEARCH_CONTEXT = - new ScriptContext<>("xpack", SearchScript.Factory.class); public static final ScriptContext SCRIPT_TEMPLATE_CONTEXT = new ScriptContext<>("xpack_template", TemplateScript.Factory.class); @@ -670,8 +666,7 @@ public List getBootstrapChecks() { @Override public List> getContexts() { - return Arrays.asList(Watcher.SCRIPT_SEARCH_CONTEXT, WatcherTransformScript.CONTEXT, - WatcherConditionScript.CONTEXT, Watcher.SCRIPT_TEMPLATE_CONTEXT); + return Arrays.asList(WatcherTransformScript.CONTEXT, WatcherConditionScript.CONTEXT, Watcher.SCRIPT_TEMPLATE_CONTEXT); } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherMockScriptPlugin.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherMockScriptPlugin.java index 2908dbaa6cc14..5a03a3c0e6433 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherMockScriptPlugin.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherMockScriptPlugin.java @@ -56,7 +56,7 @@ public Object execute() { } public static final List> CONTEXTS = Collections.unmodifiableList(Arrays.asList( - WatcherConditionScript.CONTEXT, WatcherTransformScript.CONTEXT, Watcher.SCRIPT_TEMPLATE_CONTEXT, Watcher.SCRIPT_SEARCH_CONTEXT + WatcherConditionScript.CONTEXT, WatcherTransformScript.CONTEXT, Watcher.SCRIPT_TEMPLATE_CONTEXT )); @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java index 45b85caacc03a..5c0562c0a0075 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java @@ -76,7 +76,6 @@ public void setup() { engines.put(MockMustacheScriptEngine.NAME, new MockMustacheScriptEngine()); Map> contexts = new HashMap<>(); contexts.put(Watcher.SCRIPT_TEMPLATE_CONTEXT.name, Watcher.SCRIPT_TEMPLATE_CONTEXT); - contexts.put(Watcher.SCRIPT_SEARCH_CONTEXT.name, Watcher.SCRIPT_SEARCH_CONTEXT); contexts.put(WatcherTransformScript.CONTEXT.name, WatcherTransformScript.CONTEXT); scriptService = new ScriptService(Settings.EMPTY, engines, contexts); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 8a6944fb87037..c112709bbe04b 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -414,6 +414,7 @@ public void testRollupIDSchemeAfterRestart() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34774") public void testSqlFailsOnIndexWithTwoTypes() throws IOException { // TODO this isn't going to trigger until we backport to 6.1 assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0", diff --git a/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java index 48e1a46c3ab57..0a75565fbc075 100644 --- a/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java +++ b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java @@ -60,7 +60,7 @@ public void testDeleteByQuery() { .source("test1", "index1") .filter(QueryBuilders.matchAllQuery()) .get()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [index1]", e.getMessage()); } public void testUpdateByQuery() { @@ -75,7 +75,7 @@ public void testUpdateByQuery() { IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE).source("test1", "index1").get()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [index1]", e.getMessage()); } public void testReindex() { @@ -90,6 +90,6 @@ public void testReindex() { IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("test1", "index1").destination("dest").get()); - assertEquals("no such index", e.getMessage()); + assertEquals("no such index [index1]", e.getMessage()); } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index 3448117cd2c88..20e7bf07e0f65 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -7,12 +7,18 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import java.io.IOException; import java.nio.charset.StandardCharsets; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + /** * Basic test that indexed documents survive the rolling restart. *

@@ -45,6 +51,26 @@ public void testIndexing() throws IOException { } if (CLUSTER_TYPE == ClusterType.OLD) { + { + Version minimumIndexCompatibilityVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); + assertThat("this branch is not needed if we aren't compatible with 6.0", + minimumIndexCompatibilityVersion.onOrBefore(Version.V_6_0_0), equalTo(true)); + if (minimumIndexCompatibilityVersion.before(Version.V_7_0_0_alpha1)) { + XContentBuilder template = jsonBuilder(); + template.startObject(); + { + template.field("index_patterns", "*"); + template.startObject("settings"); + template.field("number_of_shards", 5); + template.endObject(); + } + template.endObject(); + Request createTemplate = new Request("PUT", "/_template/template"); + createTemplate.setJsonEntity(Strings.toString(template)); + client().performRequest(createTemplate); + } + } + Request createTestIndex = new Request("PUT", "/test_index"); createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); client().performRequest(createTestIndex); diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 24965efc62116..6afecfc2f28da 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -13,6 +13,8 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; @@ -20,10 +22,33 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { public void testGeneratingTokenInOldCluster() throws Exception { assumeTrue("this test should only run against the old cluster", CLUSTER_TYPE == ClusterType.OLD); + { + Version minimumIndexCompatibilityVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); + assertThat("this branch is not needed if we aren't compatible with 6.0", + minimumIndexCompatibilityVersion.onOrBefore(Version.V_6_0_0), equalTo(true)); + if (minimumIndexCompatibilityVersion.before(Version.V_7_0_0_alpha1)) { + XContentBuilder template = jsonBuilder(); + template.startObject(); + { + template.field("index_patterns", "*"); + template.startObject("settings"); + template.field("number_of_shards", 5); + template.endObject(); + } + template.endObject(); + Request createTemplate = new Request("PUT", "/_template/template"); + createTemplate.setJsonEntity(Strings.toString(template)); + client().performRequest(createTemplate); + } + } + Request createTokenRequest = new Request("POST", "_xpack/security/oauth2/token"); createTokenRequest.setJsonEntity( "{\n" + diff --git a/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java b/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java index 860c30c0ddd55..974f67825f7b5 100644 --- a/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java +++ b/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java @@ -98,7 +98,7 @@ public void testSetupPasswordToolAutoSetup() throws Exception { } }); - assertEquals(5, userPasswordMap.size()); + assertEquals(6, userPasswordMap.size()); userPasswordMap.entrySet().forEach(entry -> { final String basicHeader = "Basic " + Base64.getEncoder().encodeToString((entry.getKey() + ":" + entry.getValue()).getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java index 354c44a60eee2..8105cbf9a5618 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java @@ -8,6 +8,7 @@ import org.apache.http.HttpHost; import org.apache.logging.log4j.LogManager; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; @@ -44,6 +45,7 @@ protected static void loadDatasetIntoEs(RestClient client) throws Exception { protected static void loadEmpDatasetIntoEs(RestClient client) throws Exception { loadEmpDatasetIntoEs(client, "test_emp", "employees"); loadEmpDatasetWithExtraIntoEs(client, "test_emp_copy", "employees"); + loadLogsDatasetIntoEs(client, "logs", "logs"); makeAlias(client, "test_alias", "test_emp", "test_emp_copy"); makeAlias(client, "test_alias_emp", "test_emp", "test_emp_copy"); } @@ -150,7 +152,7 @@ private static void loadEmpDatasetIntoEs(RestClient client, String index, String list.add(dep); }); - request = new Request("POST", "/" + index + "/emp/_bulk"); + request = new Request("POST", "/" + index + "/emp/_bulk?refresh=wait_for"); request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); csvToLines(fileName, (titles, fields) -> { @@ -193,6 +195,58 @@ private static void loadEmpDatasetIntoEs(RestClient client, String index, String client.performRequest(request); } + protected static void loadLogsDatasetIntoEs(RestClient client, String index, String filename) throws Exception { + Request request = new Request("PUT", "/" + index); + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + createIndex.field("number_of_replicas", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("_doc"); + { + createIndex.startObject("properties"); + { + createIndex.startObject("id").field("type", "integer").endObject(); + createIndex.startObject("@timestamp").field("type", "date").endObject(); + createIndex.startObject("bytes_in").field("type", "integer").endObject(); + createIndex.startObject("bytes_out").field("type", "integer").endObject(); + createIndex.startObject("client_ip").field("type", "ip").endObject(); + createIndex.startObject("client_port").field("type", "integer").endObject(); + createIndex.startObject("dest_ip").field("type", "ip").endObject(); + createIndex.startObject("status").field("type", "keyword").endObject(); + } + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + request.setJsonEntity(Strings.toString(createIndex)); + client.performRequest(request); + + request = new Request("POST", "/" + index + "/_doc/_bulk?refresh=wait_for"); + request.addParameter("refresh", "true"); + StringBuilder bulk = new StringBuilder(); + csvToLines(filename, (titles, fields) -> { + bulk.append("{\"index\":{\"_id\":\"" + fields.get(0) + "\"}}\n"); + bulk.append("{"); + for (int f = 0; f < titles.size(); f++) { + if (Strings.hasText(fields.get(f))) { + if (f > 0) { + bulk.append(","); + } + bulk.append('"').append(titles.get(f)).append("\":\"").append(fields.get(f)).append('"'); + } + } + bulk.append("}\n"); + }); + request.setJsonEntity(bulk.toString()); + Response response = client.performRequest(request); + } + protected static void loadLibDatasetIntoEs(RestClient client, String index) throws Exception { Request request = new Request("PUT", "/" + index); XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); @@ -221,7 +275,7 @@ protected static void loadLibDatasetIntoEs(RestClient client, String index) thro request.setJsonEntity(Strings.toString(createIndex)); client.performRequest(request); - request = new Request("POST", "/" + index + "/book/_bulk"); + request = new Request("POST", "/" + index + "/book/_bulk?refresh=wait_for"); request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); csvToLines("library", (titles, fields) -> { @@ -236,7 +290,7 @@ protected static void loadLibDatasetIntoEs(RestClient client, String index) thro bulk.append("}\n"); }); request.setJsonEntity(bulk.toString()); - client.performRequest(request); + Response response = client.performRequest(request); } protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { @@ -270,4 +324,4 @@ private static void csvToLines(String name, CheckedBiConsumer, List public static InputStream readFromJarUrl(URL source) throws IOException { return source.openStream(); } -} +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/qa/sql/src/main/resources/agg.sql-spec index 2c6248059f5fb..dab4c386a55ba 100644 --- a/x-pack/qa/sql/src/main/resources/agg.sql-spec +++ b/x-pack/qa/sql/src/main/resources/agg.sql-spec @@ -35,6 +35,8 @@ groupByOnNumberWithWhereAndLimit SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY emp_no ORDER BY emp_no DESC LIMIT 1; groupByOnNumberOnAlias SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY e ORDER BY emp_no DESC; +groupByOnNumberWithAliasInSelect +SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY emp_no ORDER BY e DESC; // group by scalar groupByAddScalar @@ -433,6 +435,10 @@ aggMultiGroupByMultiWithHavingUsingIn SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING max IN (74500, 74600) ORDER BY gender, languages; +// HAVING filter resulting in NoMatch +aggWithNoMatchHaving +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING 1 > 2 ORDER BY gender; + // // NULL tests // @@ -450,3 +456,9 @@ selectHireDateGroupByHireDate SELECT hire_date HD, COUNT(*) c FROM test_emp GROUP BY hire_date ORDER BY hire_date DESC; selectSalaryGroupBySalary SELECT salary, COUNT(*) c FROM test_emp GROUP BY salary ORDER BY salary DESC; + +// filter with IN +aggMultiWithHavingUsingInAndNullHandling +SELECT MIN(salary) min, MAX(salary) max, gender g, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g HAVING max IN(74999, null, 74600) ORDER BY gender; +aggMultiGroupByMultiWithHavingUsingInAndNullHandling +SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING max IN (74500, null, 74600) ORDER BY gender, languages; diff --git a/x-pack/qa/sql/src/main/resources/command.csv-spec b/x-pack/qa/sql/src/main/resources/command.csv-spec index 06f38f0a07e47..cc71dd947129a 100644 --- a/x-pack/qa/sql/src/main/resources/command.csv-spec +++ b/x-pack/qa/sql/src/main/resources/command.csv-spec @@ -152,6 +152,7 @@ showTables SHOW TABLES; name | type +logs |BASE TABLE test_alias |ALIAS test_alias_emp |ALIAS test_emp |BASE TABLE diff --git a/x-pack/qa/sql/src/main/resources/filter.sql-spec b/x-pack/qa/sql/src/main/resources/filter.sql-spec index 1a564ecb9ad82..c4ddbf66e0d4d 100644 --- a/x-pack/qa/sql/src/main/resources/filter.sql-spec +++ b/x-pack/qa/sql/src/main/resources/filter.sql-spec @@ -79,6 +79,9 @@ SELECT last_name l FROM "test_emp" WHERE emp_no BETWEEN 9990 AND 10003 ORDER BY whereNotBetween SELECT last_name l FROM "test_emp" WHERE emp_no NOT BETWEEN 10010 AND 10020 ORDER BY emp_no LIMIT 5; +whereNoMatch +SELECT last_name l FROM "test_emp" WHERE 1 = 2 ORDER BY 1 LIMIT 10; + // // IN expression // @@ -96,3 +99,8 @@ SELECT last_name l FROM "test_emp" WHERE emp_no NOT IN (10000, 10001, 10002, 999 whereWithInAndComplexFunctions SELECT last_name l FROM "test_emp" WHERE emp_no NOT IN (10000, abs(2 - 10003), 10002, 999) AND lcase(first_name) IN ('sumant', 'mary', 'patricio', 'No''Match') ORDER BY emp_no LIMIT 5; + +whereWithInAndNullHandling1 +SELECT last_name l FROM "test_emp" WHERE birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) AND (emp_no = 10038 OR emp_no = 10039 OR emp_no = 10040) ORDER BY emp_no; +whereWithInAndNullHandling2 +SELECT last_name l FROM "test_emp" WHERE birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) AND (emp_no = 10038 OR emp_no = 10039 OR emp_no = 10040) ORDER BY emp_no; diff --git a/x-pack/qa/sql/src/main/resources/ip.csv-spec b/x-pack/qa/sql/src/main/resources/ip.csv-spec new file mode 100644 index 0000000000000..e8075d57c05c6 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/ip.csv-spec @@ -0,0 +1,198 @@ +// +// Tests for IP fields +// + +selectAll +SELECT * FROM logs ORDER BY id LIMIT 10; + + @timestamp | bytes_in | bytes_out | client_ip | client_port | dest_ip | id | status +------------------------+---------------+---------------+---------------+---------------+---------------+---------------+--------------- +2017-11-10T21:15:54Z|47 |388 |10.0.1.1 |9152 |172.27.1.129 |1 |OK +2017-11-10T21:15:39Z|29 |374 |10.0.1.1 |31693 |172.27.1.123 |2 |OK +2017-11-10T21:15:39Z|35 |303 |10.0.1.1 |23625 |172.27.1.113 |3 |OK +2017-11-10T21:15:39Z|36 |312 |10.0.1.1 |9932 |172.27.1.116 |4 |OK +2017-11-10T21:15:40Z|35 |344 |10.0.1.1 |22695 |172.27.1.149 |5 |OK +2017-11-10T21:15:40Z|31 |503 |10.0.1.1 |59811 |172.27.1.122 |6 |OK +2017-11-10T21:15:40Z|35 |458 |10.0.1.7 |57372 |172.27.1.140 |7 |OK +2017-11-10T21:15:41Z|35 |281 |10.0.1.8 |17370 |null |8 |OK +2017-11-10T21:15:41Z|46 |231 |10.0.1.9 |65004 |null |9 |OK +2017-11-10T20:36:07Z|40 |506 |10.0.1.10 |22661 |null |10 |OK +; + +selectIpField +SELECT client_ip, dest_ip FROM logs ORDER BY id LIMIT 10; + + client_ip | dest_ip +---------------+--------------- +10.0.1.1 |172.27.1.129 +10.0.1.1 |172.27.1.123 +10.0.1.1 |172.27.1.113 +10.0.1.1 |172.27.1.116 +10.0.1.1 |172.27.1.149 +10.0.1.1 |172.27.1.122 +10.0.1.7 |172.27.1.140 +10.0.1.8 |null +10.0.1.9 |null +10.0.1.10 |null +; + +orderByIpv4Field +SELECT client_ip, dest_ip FROM logs ORDER BY client_ip LIMIT 5; + + client_ip | dest_ip +---------------+------------------------------ +10.0.0.105 |172.27.1.1 +10.0.0.107 |172.20.10.8 +10.0.0.109 |2001:cafe::470f:60b7:f84a:25b6 +10.0.0.113 |90.128.199.24 +10.0.0.118 |172.27.1.1 +; + +orderByIpv6Field +SELECT client_ip, dest_ip FROM logs ORDER BY dest_ip ASC LIMIT 5; + + client_ip:s | dest_ip:s +---------------+------------------------------ +null |27.58.6.220 +10.0.0.147 |90.128.199.24 +10.0.0.113 |90.128.199.24 +10.0.0.129 |172.16.1.1 +10.0.1.177 |172.20.10.1 +; + +filterExactMatchIpv4 +SELECT id, client_ip, dest_ip FROM logs WHERE client_ip = '10.0.1.166' ORDER BY id LIMIT 5; + + id | client_ip | dest_ip +---------------+---------------+------------------------------ +22 |10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9f +24 |10.0.1.166 |2001:cafe::13e1:16fc:8726:1bf8 +29 |10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9f +33 |10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9f +34 |10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9e +; + +filterExactMatchIpv6 +SELECT id, client_ip, dest_ip FROM logs WHERE dest_ip = 'fe80::86ba:3bff:fe05:c3f3' ORDER BY id LIMIT 10; + + id | client_ip | dest_ip +---------------+---------------+------------------------- +19 |10.0.1.13 |fe80::86ba:3bff:fe05:c3f3 +; + + +filterRangeIpv4 +SELECT id, client_ip, dest_ip FROM logs WHERE client_ip BETWEEN '10.0.1.1' AND '10.0.1.200' ORDER BY id LIMIT 10; + + id | client_ip | dest_ip +---------------+---------------+--------------- +1 |10.0.1.1 |172.27.1.129 +2 |10.0.1.1 |172.27.1.123 +3 |10.0.1.1 |172.27.1.113 +4 |10.0.1.1 |172.27.1.116 +5 |10.0.1.1 |172.27.1.149 +6 |10.0.1.1 |172.27.1.122 +7 |10.0.1.7 |172.27.1.140 +8 |10.0.1.8 |null +9 |10.0.1.9 |null +10 |10.0.1.10 |null +; + +filterRangeCIDRIpv4 +SELECT id, client_ip, dest_ip FROM logs WHERE client_ip = '10.0.0.0/16' ORDER BY id LIMIT 5; + + id | client_ip | dest_ip +---------------+---------------+--------------- +1 |10.0.1.1 |172.27.1.129 +2 |10.0.1.1 |172.27.1.123 +3 |10.0.1.1 |172.27.1.113 +4 |10.0.1.1 |172.27.1.116 +5 |10.0.1.1 |172.27.1.149 +; + +filterRangeCIDRIpv6 +SELECT id, client_ip, dest_ip FROM logs WHERE dest_ip = '2001:cafe::/48' ORDER BY id LIMIT 5; + + id | client_ip | dest_ip +---------------+---------------+------------------------------ +20 |10.0.1.199 |2001:cafe::ff07:bdcc:bc59:ff9f +22 |10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9f +23 |null |2001:cafe::d46a:9bdc:8126:b00b +24 |10.0.1.166 |2001:cafe::13e1:16fc:8726:1bf8 +25 |10.0.1.199 |2001:cafe::ff07:bdcc:bc59:ff9f +; + +// +// waiting on https://github.com/elastic/elasticsearch/issues/34799 +// +filterInCIDRIpv4-Ignore +SELECT id, client_ip, dest_ip FROM logs WHERE dest_ip IN ('10.0.1.1', '10.0.1.200', '10.0.0.0/16') ORDER BY id LIMIT 10; + + id | client_ip | dest_ip +---------------+---------------+------------------------------ +20 |10.0.1.199 |2001:cafe::ff07:bdcc:bc59:ff9f +22 |10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9f +23 |10.0.1.199 |2001:cafe::d46a:9bdc:8126:b00b +24 |10.0.1.166 |2001:cafe::13e1:16fc:8726:1bf8 +; + + +filterInCIDRIpv6-Ignore +SELECT id, client_ip, dest_ip FROM logs WHERE dest_ip IN ('127.0.0.1', '2001:cafe::13e1:16fc:8726:1bf8', '2001:cafe::/48') ORDER BY id LIMIT 10; + + id | client_ip | dest_ip +---------------+---------------+------------------------------ +20 |10.0.1.199 |2001:cafe::ff07:bdcc:bc59:ff9f +22 |10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9f +23 |10.0.1.199 |2001:cafe::d46a:9bdc:8126:b00b +24 |10.0.1.166 |2001:cafe::13e1:16fc:8726:1bf8 +; + +groupByIpv4 +SELECT client_ip FROM logs GROUP BY client_ip LIMIT 5; + + client_ip:s +--------------- +null +10.0.0.105 +10.0.0.107 +10.0.0.109 +10.0.0.113 +; + +groupByIpv6 +SELECT dest_ip FROM logs GROUP BY dest_ip ORDER BY dest_ip DESC LIMIT 5; + + dest_ip +------------------------------ +fe80::a65e:60ff:fee8:fee9 +fe80::86ba:3bff:fe05:c3f3 +2001:cafe::ff07:bdcc:bc59:ff9f +2001:cafe::ff07:bdcc:bc59:ff9e +2001:cafe::ff07:bdcc:bc59:ff9d +; + +groupByIpv4AndIpv6 +SELECT client_ip, dest_ip FROM logs GROUP BY client_ip, dest_ip ORDER BY dest_ip DESC LIMIT 5; + + client_ip | dest_ip +---------------+------------------------------ +10.0.1.222 |fe80::a65e:60ff:fee8:fee9 +10.0.1.13 |fe80::86ba:3bff:fe05:c3f3 +null |2001:cafe::ff07:bdcc:bc59:ff9f +10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9f +10.0.1.199 |2001:cafe::ff07:bdcc:bc59:ff9f +; + + +groupByIpv4AndPort +SELECT client_ip, client_port FROM logs GROUP BY client_ip, client_port ORDER BY client_port DESC LIMIT 5; + + client_ip | client_port +---------------+--------------- +10.0.1.9 |65004 +10.0.0.129 |63982 +null |63238 +null |61337 +null |61220 +; diff --git a/x-pack/qa/sql/src/main/resources/logs.csv b/x-pack/qa/sql/src/main/resources/logs.csv new file mode 100644 index 0000000000000..240fb3752ab53 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/logs.csv @@ -0,0 +1,101 @@ +id,@timestamp,bytes_in,bytes_out,client_ip,client_port,dest_ip,status +1,2017-11-10T21:15:54Z,47,388,10.0.1.1,9152,172.27.1.129,OK +2,2017-11-10T21:15:39Z,29,374,10.0.1.1,31693,172.27.1.123,OK +3,2017-11-10T21:15:39Z,35,303,10.0.1.1,23625,172.27.1.113,OK +4,2017-11-10T21:15:39Z,36,312,10.0.1.1,9932,172.27.1.116,OK +5,2017-11-10T21:15:40Z,35,344,10.0.1.1,22695,172.27.1.149,OK +6,2017-11-10T21:15:40Z,31,503,10.0.1.1,59811,172.27.1.122,OK +7,2017-11-10T21:15:40Z,35,458,10.0.1.7,57372,172.27.1.140,OK +8,2017-11-10T21:15:41Z,35,281,10.0.1.8,17370,,OK +9,2017-11-10T21:15:41Z,46,231,10.0.1.9,65004,,OK +10,2017-11-10T20:36:07Z,40,506,10.0.1.10,22661,,OK +11,2017-11-10T20:36:08Z,34,471,10.0.1.11,16752,172.27.1.131,OK +12,2017-11-10T20:36:07Z,39,503,10.0.1.12,19479,172.27.1.103,OK +13,2017-11-10T20:36:07Z,29,502,10.0.1.13,2326,172.27.1.139,OK +14,2017-11-10T20:36:15Z,35,280,10.0.1.13,51758,172.27.1.129,OK +15,2017-11-10T20:36:15Z,38,225,,22994,172.27.1.139,OK +16,2017-11-10T20:35:54Z,35,326,,5505,172.27.1.120,OK +17,2017-11-10T20:35:54Z,46,466,10.0.1.13,3666,172.27.1.103,OK +18,2017-11-10T20:35:55Z,42,238,10.0.1.13,23791,172.27.1.111,OK +19,2017-11-10T17:54:43Z,16,,10.0.1.13,,fe80::86ba:3bff:fe05:c3f3,OK +20,2017-11-10T23:23:24Z,40,,10.0.1.199,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +21,2017-11-10T17:54:59Z,24,,10.0.1.222,,fe80::a65e:60ff:fee8:fee9,OK +22,2017-11-10T21:13:27Z,20,,10.0.1.166,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +23,2017-11-10T22:37:41Z,24,,,,2001:cafe::d46a:9bdc:8126:b00b,OK +24,2017-11-10T20:34:43Z,8,,10.0.1.166,,2001:cafe::13e1:16fc:8726:1bf8,OK +25,2017-11-10T23:30:46Z,40,,10.0.1.199,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +26,2017-11-10T21:13:16Z,20,,,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +27,2017-11-10T23:36:32Z,0,,10.0.1.199,,2001:cafe::13e1:16fc:8726:1bf8,OK +28,2017-11-10T23:36:33Z,40,,10.0.1.199,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +29,2017-11-10T20:35:26Z,20,,10.0.1.166,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +30,2017-11-10T23:36:41Z,8,,,,2001:cafe::13e1:16fc:8726:1bf8,OK +31,2017-11-10T23:56:36Z,8,,10.0.1.199,,2001:cafe::13e1:16fc:8726:1bf8,OK +32,2017-11-10T20:29:25Z,32,,10.0.1.177,59769,172.20.10.1,Error +33,2017-11-10T21:35:01Z,20,,10.0.1.166,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +34,2017-11-10T21:12:17Z,20,,10.0.1.166,,2001:cafe::ff07:bdcc:bc59:ff9e,OK +35,2017-11-10T23:17:14Z,40,,10.0.1.199,,2001:cafe::ff07:bdcc:bc59:ff9d,OK +36,2017-11-10T23:28:11Z,8,,10.0.1.199,,2001:cafe::13e1:16fc:8726:1bf8,OK +37,2017-11-10T22:36:27Z,8,,10.0.1.199,,2001:cafe::13e1:16fc:8726:1bf8,OK +38,2017-11-10T20:35:55Z,36,281,,58533,172.27.1.1,OK +39,2017-11-10T20:35:55Z,25,273,,39211,,OK +40,2017-11-10T20:35:55Z,34,253,,37971,172.27.1.1,OK +41,2017-11-10T20:35:55Z,41,503,,47831,172.27.1.1,OK +42,2017-11-10T21:34:49Z,28,,,,27.58.6.220,Error +43,2017-11-10T20:35:55Z,28,206,10.0.1.200,31000,172.27.1.1,OK +44,2017-11-10T20:14:04Z,8,,10.0.1.201,,2001:cafe::13e1:16fc:8726:1bf8,OK +45,2017-11-10T19:38:06Z,37,239,10.0.1.202,3577,172.27.1.1,OK +46,2017-11-10T21:14:18Z,8,,10.0.1.203,,2001:cafe::13e1:16fc:8726:1bf8,OK +47,2017-11-10T20:35:56Z,34,202,10.0.1.204,49112,172.27.1.1,OK +48,2017-11-10T20:53:05Z,8,,10.0.1.205,,2001:cafe::13e1:16fc:8726:1bf8,OK +49,2017-11-10T21:25:42Z,8,,10.0.1.206,,2001:cafe::13e1:16fc:8726:1bf9,OK +50,2017-11-10T21:14:44Z,8,,10.0.1.207,,2001:cafe::13e1:16fc:8726:1bf4,OK +51,2017-11-10T21:28:34Z,8,,10.0.1.208,,2001:cafe::13e1:16fc:8726:1bf3,OK +52,2017-11-10T20:35:55Z,34,227,,63238,172.27.1.1,OK +53,2017-11-10T20:15:24Z,8,,,,2001:cafe::13e1:16fc:8726:1bf8,OK +54,2017-11-10T20:35:57Z,37,239,,61337,172.27.1.1,OK +55,2017-11-10T17:14:10Z,16,,10.0.1.222,,2001:cafe::a98d:374:79e4:4865,OK +56,2017-11-10T20:35:57Z,38,476,10.0.1.200,53720,172.27.1.1,OK +57,2017-11-10T23:22:13Z,8,,10.0.1.201,,2001:cafe::13e1:16fc:8726:1bf8,OK +58,2017-11-10T20:32:57Z,8,,10.0.1.202,,2001:cafe::13e1:16fc:8726:1bf7,OK +59,2017-11-10T21:24:00Z,8,,10.0.1.203,,2001:cafe::13e1:16fc:8726:1bf6,OK +60,2017-11-10T20:35:56Z,32,503,10.0.1.204,19382,172.27.1.1,OK +61,2017-11-10T23:43:10Z,0,,10.0.1.205,,2001:cafe::13e1:16fc:8726:1bf8,OK +62,2017-11-10T20:35:57Z,30,169,10.0.1.206,47532,172.27.1.1,OK +63,2017-11-10T20:21:58Z,20,,10.0.1.207,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +64,2017-11-10T20:35:57Z,41,271,10.0.1.208,16227,,OK +65,2017-11-10T20:33:06Z,28,,10.0.1.166,,172.27.1.1,Error +66,2017-11-10T20:35:57Z,33,185,,28928,172.27.1.1,OK +67,2017-11-10T20:26:21Z,20,,,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +68,2017-11-10T21:23:25Z,20,,,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +69,2017-11-10T21:23:54Z,8,,10.0.1.166,,2001:cafe::13e1:16fc:8726:1bf8,OK +70,2017-11-10T20:35:57Z,35,234,10.0.1.166,54994,172.27.1.1,OK +71,2017-11-10T00:27:03Z,48,,10.0.1.122,,2001:cafe::470f:60b7:f84a:25b6,OK +72,2017-11-10T00:27:46Z,48,,10.0.1.122,,2001:cafe::470f:60b7:f84a:25b6,OK +73,2017-11-10T20:35:58Z,35,223,,20163,172.27.1.1,OK +74,2017-11-10T20:35:57Z,32,501,10.0.1.166,51275,172.27.1.1,OK +75,2017-11-10T22:27:09Z,20,,10.0.1.199,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +76,2017-11-10T20:35:58Z,45,493,10.0.1.166,1193,172.27.1.1,OK +77,2017-11-10T22:26:44Z,20,,10.0.1.199,,2001:cafe::ff07:bdcc:bc59:ff9f,OK +78,2017-11-10T22:27:31Z,8,,10.0.1.199,,2001:cafe::13e1:16fc:8726:1bf8,OK +79,2017-11-10T20:35:52Z,47,246,,24564,172.27.1.1,OK +80,2017-11-10T00:00:22Z,48,,10.0.1.122,,2001:cafe::470f:60b7:f84a:25b6,OK +81,2017-11-10T20:35:52Z,37,420,10.0.1.166,40542,172.27.1.1,OK +82,2017-11-10T00:01:20Z,48,,10.0.1.122,,2001:cafe::470f:60b7:f84a:25b6,OK +83,2017-11-10T00:01:04Z,48,,10.0.1.122,,2001:cafe::470f:60b7:f84a:25b6,OK +84,2017-11-10T00:32:48Z,48,,10.0.1.122,,2001:cafe::470f:60b7:f84a:25b5,OK +85,2017-11-10T00:01:45Z,48,,10.0.1.122,,2001:cafe::470f:60b7:f84a:25b4,OK +86,2017-11-10T20:36:08Z,38,509,,61220,172.27.1.1,OK +87,2017-11-10T21:17:37Z,38,226,10.0.0.144,26602,,OK +88,2017-11-10T20:06:49Z,30,,10.0.0.147,53240,90.128.199.24,Error +89,2017-11-10T21:17:37Z,44,284,10.0.0.118,49479,172.27.1.1,OK +90,2017-11-10T19:51:38Z,28,,10.0.0.130,,203.131.98.151,Error +91,2017-11-10T19:51:38Z,28,,10.0.0.107,,172.20.10.8,Error +92,2017-11-10T20:06:50Z,34,215,10.0.0.113,25162,90.128.199.24,OK +93,2017-11-10T21:17:46Z,33,185,10.0.0.129,63982,172.27.1.1,OK +94,2017-11-10T19:51:38Z,28,,10.0.0.130,,203.131.98.151,Error +95,2017-11-10T21:17:46Z,28,321,10.0.0.105,4292,172.27.1.1,OK +96,2017-11-10T00:04:50Z,48,,10.0.0.109,,2001:cafe::470f:60b7:f84a:25b6,OK +97,2017-11-10T21:17:48Z,30,280,10.0.0.145,57783,172.27.1.1,OK +98,2017-11-10T21:12:24Z,74,90,10.0.0.134,57203,172.20.10.1,OK +99,2017-11-10T21:17:37Z,39,512,10.0.0.128,29333,,OK +100,2017-11-10T03:21:36Z,64,183,10.0.0.129,4541,172.16.1.1,OK diff --git a/x-pack/qa/sql/src/main/resources/select.csv-spec b/x-pack/qa/sql/src/main/resources/select.csv-spec index b3888abd47bf3..bf208c62026df 100644 --- a/x-pack/qa/sql/src/main/resources/select.csv-spec +++ b/x-pack/qa/sql/src/main/resources/select.csv-spec @@ -25,6 +25,22 @@ false |true ; +inWithNullHandling +SELECT 2 IN (1, null, 3), 3 IN (1, null, 3), null IN (1, null, 3), null IN (1, 2, 3); + + 2 IN (1, null, 3) | 3 IN (1, null, 3) | null IN (1, null, 3) | null IN (1, 2, 3) +--------------------+--------------------+-----------------------+------------------- +null |true |null | null +; + +inWithNullHandlingAndNegation +SELECT NOT 2 IN (1, null, 3), NOT 3 IN (1, null, 3), NOT null IN (1, null, 3), NOT null IN (1, 2, 3); + + NOT 2 IN (1, null, 3) | NOT 3 IN (1, null, 3) | NOT null IN (1, null, 3) | null IN (1, 2, 3) +------------------------+------------------------+---------------------------+-------------------- +null |false |null | null +; + // // SELECT with IN and table columns // @@ -64,4 +80,23 @@ SELECT 1 IN (1, abs(2 - 4), 3) OR emp_no NOT IN (10000, 10000 + 1, 10002) FROM t 10003 10004 10005 -; \ No newline at end of file +; + +inWithTableColumnAndNullHandling +SELECT emp_no, birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)), birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) FROM test_emp WHERE emp_no = 10038 OR emp_no = 10039 OR emp_no = 10040 ORDER BY 1; + + emp_no | birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) | birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) +--------+-------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------ +10038 | true | true +10039 | null | null +10040 | false | null + + +inWithTableColumnAndNullHandlingAndNegation +SELECT emp_no, NOT birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)), NOT birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) FROM test_emp WHERE emp_no = 10038 OR emp_no = 10039 OR emp_no = 10040 ORDER BY 1; + + emp_no | NOT birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) | NOT birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) +--------+-----------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------ +10038 | false | false +10039 | null | null +10040 | true | null \ No newline at end of file